diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 31 | ||||
-rw-r--r-- | kernel/audit.h | 2 | ||||
-rw-r--r-- | kernel/audit_tree.c | 2 | ||||
-rw-r--r-- | kernel/audit_watch.c | 2 | ||||
-rw-r--r-- | kernel/auditfilter.c | 10 | ||||
-rw-r--r-- | kernel/cgroup.c | 11 | ||||
-rw-r--r-- | kernel/cpuset.c | 10 | ||||
-rw-r--r-- | kernel/events/core.c | 12 | ||||
-rw-r--r-- | kernel/futex.c | 53 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 3 | ||||
-rw-r--r-- | kernel/profile.c | 4 | ||||
-rw-r--r-- | kernel/sched/clock.c | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 9 | ||||
-rw-r--r-- | kernel/sched/cpudeadline.c | 4 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 10 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 7 | ||||
-rw-r--r-- | kernel/tracepoint.c | 7 |
22 files changed, 139 insertions, 77 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..3392d3e0254a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -182,7 +182,7 @@ struct audit_buffer { | |||
182 | 182 | ||
183 | struct audit_reply { | 183 | struct audit_reply { |
184 | __u32 portid; | 184 | __u32 portid; |
185 | pid_t pid; | 185 | struct net *net; |
186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
187 | }; | 187 | }; |
188 | 188 | ||
@@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
500 | { | 500 | { |
501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
505 | 505 | ||
506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
@@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
512 | 512 | ||
513 | put_net(net); | ||
513 | kfree(dest); | 514 | kfree(dest); |
514 | 515 | ||
515 | return 0; | 516 | return 0; |
@@ -543,7 +544,7 @@ out_kfree_skb: | |||
543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
544 | { | 545 | { |
545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
548 | 549 | ||
549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
@@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
556 | put_net(net); | ||
555 | kfree(reply); | 557 | kfree(reply); |
556 | return 0; | 558 | return 0; |
557 | } | 559 | } |
558 | /** | 560 | /** |
559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
561 | * @seq: sequence number | 563 | * @seq: sequence number |
562 | * @type: audit message type | 564 | * @type: audit message type |
563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
@@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
569 | * No failure notifications. | 571 | * No failure notifications. |
570 | */ | 572 | */ |
571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
573 | { | 575 | { |
576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
@@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
583 | if (!skb) | 587 | if (!skb) |
584 | goto out; | 588 | goto out; |
585 | 589 | ||
590 | reply->net = get_net(net); | ||
586 | reply->portid = portid; | 591 | reply->portid = portid; |
587 | reply->pid = task_pid_vnr(current); | ||
588 | reply->skb = skb; | 592 | reply->skb = skb; |
589 | 593 | ||
590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
@@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
673 | 677 | ||
674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 678 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
675 | 679 | ||
676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 680 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
677 | &af, sizeof(af)); | ||
678 | 681 | ||
679 | return 0; | 682 | return 0; |
680 | } | 683 | } |
@@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
794 | s.backlog = skb_queue_len(&audit_skb_queue); | 797 | s.backlog = skb_queue_len(&audit_skb_queue); |
795 | s.version = AUDIT_VERSION_LATEST; | 798 | s.version = AUDIT_VERSION_LATEST; |
796 | s.backlog_wait_time = audit_backlog_wait_time; | 799 | s.backlog_wait_time = audit_backlog_wait_time; |
797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 800 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
798 | &s, sizeof(s)); | ||
799 | break; | 801 | break; |
800 | } | 802 | } |
801 | case AUDIT_SET: { | 803 | case AUDIT_SET: { |
@@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
905 | seq, data, nlmsg_len(nlh)); | 907 | seq, data, nlmsg_len(nlh)); |
906 | break; | 908 | break; |
907 | case AUDIT_LIST_RULES: | 909 | case AUDIT_LIST_RULES: |
908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 910 | err = audit_list_rules_send(skb, seq); |
909 | break; | 911 | break; |
910 | case AUDIT_TRIM: | 912 | case AUDIT_TRIM: |
911 | audit_trim_trees(); | 913 | audit_trim_trees(); |
@@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
970 | memcpy(sig_data->ctx, ctx, len); | 972 | memcpy(sig_data->ctx, ctx, len); |
971 | security_release_secctx(ctx, len); | 973 | security_release_secctx(ctx, len); |
972 | } | 974 | } |
973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 975 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 976 | sig_data, sizeof(*sig_data) + len); |
975 | kfree(sig_data); | 977 | kfree(sig_data); |
976 | break; | 978 | break; |
977 | case AUDIT_TTY_GET: { | 979 | case AUDIT_TTY_GET: { |
@@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 985 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
984 | spin_unlock(&tsk->sighand->siglock); | 986 | spin_unlock(&tsk->sighand->siglock); |
985 | 987 | ||
986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 988 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
988 | break; | 989 | break; |
989 | } | 990 | } |
990 | case AUDIT_TTY_SET: { | 991 | case AUDIT_TTY_SET: { |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..8df132214606 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -247,7 +247,7 @@ extern void audit_panic(const char *message); | |||
247 | 247 | ||
248 | struct audit_netlink_list { | 248 | struct audit_netlink_list { |
249 | __u32 portid; | 249 | __u32 portid; |
250 | pid_t pid; | 250 | struct net *net; |
251 | struct sk_buff_head q; | 251 | struct sk_buff_head q; |
252 | }; | 252 | }; |
253 | 253 | ||
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 67ccf0e7cca9..135944a7b28a 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -916,7 +916,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group, | |||
916 | struct fsnotify_mark *inode_mark, | 916 | struct fsnotify_mark *inode_mark, |
917 | struct fsnotify_mark *vfsmount_mark, | 917 | struct fsnotify_mark *vfsmount_mark, |
918 | u32 mask, void *data, int data_type, | 918 | u32 mask, void *data, int data_type, |
919 | const unsigned char *file_name) | 919 | const unsigned char *file_name, u32 cookie) |
920 | { | 920 | { |
921 | return 0; | 921 | return 0; |
922 | } | 922 | } |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 2596fac5dcb4..70b4554d2fbe 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
@@ -471,7 +471,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, | |||
471 | struct fsnotify_mark *inode_mark, | 471 | struct fsnotify_mark *inode_mark, |
472 | struct fsnotify_mark *vfsmount_mark, | 472 | struct fsnotify_mark *vfsmount_mark, |
473 | u32 mask, void *data, int data_type, | 473 | u32 mask, void *data, int data_type, |
474 | const unsigned char *dname) | 474 | const unsigned char *dname, u32 cookie) |
475 | { | 475 | { |
476 | struct inode *inode; | 476 | struct inode *inode; |
477 | struct audit_parent *parent; | 477 | struct audit_parent *parent; |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..92062fd6cc8c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <net/net_namespace.h> | ||
33 | #include <net/sock.h> | ||
32 | #include "audit.h" | 34 | #include "audit.h" |
33 | 35 | ||
34 | /* | 36 | /* |
@@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
1065 | 1067 | ||
1066 | /** | 1068 | /** |
1067 | * audit_list_rules_send - list the audit rules | 1069 | * audit_list_rules_send - list the audit rules |
1068 | * @portid: target portid for netlink audit messages | 1070 | * @request_skb: skb of request we are replying to (used to target the reply) |
1069 | * @seq: netlink audit message sequence (serial) number | 1071 | * @seq: netlink audit message sequence (serial) number |
1070 | */ | 1072 | */ |
1071 | int audit_list_rules_send(__u32 portid, int seq) | 1073 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
1072 | { | 1074 | { |
1075 | u32 portid = NETLINK_CB(request_skb).portid; | ||
1076 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
1073 | struct task_struct *tsk; | 1077 | struct task_struct *tsk; |
1074 | struct audit_netlink_list *dest; | 1078 | struct audit_netlink_list *dest; |
1075 | int err = 0; | 1079 | int err = 0; |
@@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1087 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
1084 | if (!dest) | 1088 | if (!dest) |
1085 | return -ENOMEM; | 1089 | return -ENOMEM; |
1090 | dest->net = get_net(net); | ||
1086 | dest->portid = portid; | 1091 | dest->portid = portid; |
1087 | dest->pid = task_pid_vnr(current); | ||
1088 | skb_queue_head_init(&dest->q); | 1092 | skb_queue_head_init(&dest->q); |
1089 | 1093 | ||
1090 | mutex_lock(&audit_filter_mutex); | 1094 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 105f273b6f86..0c753ddd223b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
4112 | 4112 | ||
4113 | err = percpu_ref_init(&css->refcnt, css_release); | 4113 | err = percpu_ref_init(&css->refcnt, css_release); |
4114 | if (err) | 4114 | if (err) |
4115 | goto err_free; | 4115 | goto err_free_css; |
4116 | 4116 | ||
4117 | init_css(css, ss, cgrp); | 4117 | init_css(css, ss, cgrp); |
4118 | 4118 | ||
4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); |
4120 | if (err) | 4120 | if (err) |
4121 | goto err_free; | 4121 | goto err_free_percpu_ref; |
4122 | 4122 | ||
4123 | err = online_css(css); | 4123 | err = online_css(css); |
4124 | if (err) | 4124 | if (err) |
4125 | goto err_free; | 4125 | goto err_clear_dir; |
4126 | 4126 | ||
4127 | dget(cgrp->dentry); | 4127 | dget(cgrp->dentry); |
4128 | css_get(css->parent); | 4128 | css_get(css->parent); |
@@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
4138 | 4138 | ||
4139 | return 0; | 4139 | return 0; |
4140 | 4140 | ||
4141 | err_free: | 4141 | err_clear_dir: |
4142 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | ||
4143 | err_free_percpu_ref: | ||
4142 | percpu_ref_cancel_init(&css->refcnt); | 4144 | percpu_ref_cancel_init(&css->refcnt); |
4145 | err_free_css: | ||
4143 | ss->css_free(css); | 4146 | ss->css_free(css); |
4144 | return err; | 4147 | return err; |
4145 | } | 4148 | } |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..e6b1b66afe52 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
974 | * Temporarilly set tasks mems_allowed to target nodes of migration, | 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, |
975 | * so that the migration code can allocate pages on these nodes. | 975 | * so that the migration code can allocate pages on these nodes. |
976 | * | 976 | * |
977 | * Call holding cpuset_mutex, so current's cpuset won't change | ||
978 | * during this call, as manage_mutex holds off any cpuset_attach() | ||
979 | * calls. Therefore we don't need to take task_lock around the | ||
980 | * call to guarantee_online_mems(), as we know no one is changing | ||
981 | * our task's cpuset. | ||
982 | * | ||
983 | * While the mm_struct we are migrating is typically from some | 977 | * While the mm_struct we are migrating is typically from some |
984 | * other task, the task_struct mems_allowed that we are hacking | 978 | * other task, the task_struct mems_allowed that we are hacking |
985 | * is for our current task, which must allocate new pages for that | 979 | * is for our current task, which must allocate new pages for that |
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
996 | 990 | ||
997 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 991 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
998 | 992 | ||
993 | rcu_read_lock(); | ||
999 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 994 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
1000 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); | 995 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); |
996 | rcu_read_unlock(); | ||
1001 | } | 997 | } |
1002 | 998 | ||
1003 | /* | 999 | /* |
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
2486 | 2482 | ||
2487 | task_lock(current); | 2483 | task_lock(current); |
2488 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2484 | cs = nearest_hardwall_ancestor(task_cs(current)); |
2485 | allowed = node_isset(node, cs->mems_allowed); | ||
2489 | task_unlock(current); | 2486 | task_unlock(current); |
2490 | 2487 | ||
2491 | allowed = node_isset(node, cs->mems_allowed); | ||
2492 | mutex_unlock(&callback_mutex); | 2488 | mutex_unlock(&callback_mutex); |
2493 | return allowed; | 2489 | return allowed; |
2494 | } | 2490 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..fa0b2d4ad83c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7856,14 +7856,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
7856 | static void __perf_event_exit_context(void *__info) | 7856 | static void __perf_event_exit_context(void *__info) |
7857 | { | 7857 | { |
7858 | struct perf_event_context *ctx = __info; | 7858 | struct perf_event_context *ctx = __info; |
7859 | struct perf_event *event, *tmp; | 7859 | struct perf_event *event; |
7860 | 7860 | ||
7861 | perf_pmu_rotate_stop(ctx->pmu); | 7861 | perf_pmu_rotate_stop(ctx->pmu); |
7862 | 7862 | ||
7863 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 7863 | rcu_read_lock(); |
7864 | __perf_remove_from_context(event); | 7864 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) |
7865 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | ||
7866 | __perf_remove_from_context(event); | 7865 | __perf_remove_from_context(event); |
7866 | rcu_read_unlock(); | ||
7867 | } | 7867 | } |
7868 | 7868 | ||
7869 | static void perf_event_exit_cpu_context(int cpu) | 7869 | static void perf_event_exit_cpu_context(int cpu) |
@@ -7887,11 +7887,11 @@ static void perf_event_exit_cpu(int cpu) | |||
7887 | { | 7887 | { |
7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7888 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7889 | 7889 | ||
7890 | perf_event_exit_cpu_context(cpu); | ||
7891 | |||
7890 | mutex_lock(&swhash->hlist_mutex); | 7892 | mutex_lock(&swhash->hlist_mutex); |
7891 | swevent_hlist_release(swhash); | 7893 | swevent_hlist_release(swhash); |
7892 | mutex_unlock(&swhash->hlist_mutex); | 7894 | mutex_unlock(&swhash->hlist_mutex); |
7893 | |||
7894 | perf_event_exit_cpu_context(cpu); | ||
7895 | } | 7895 | } |
7896 | #else | 7896 | #else |
7897 | static inline void perf_event_exit_cpu(int cpu) { } | 7897 | static inline void perf_event_exit_cpu(int cpu) { } |
diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..08ec814ad9d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = { | |||
234 | * waiting on a futex. | 234 | * waiting on a futex. |
235 | */ | 235 | */ |
236 | struct futex_hash_bucket { | 236 | struct futex_hash_bucket { |
237 | atomic_t waiters; | ||
237 | spinlock_t lock; | 238 | spinlock_t lock; |
238 | struct plist_head chain; | 239 | struct plist_head chain; |
239 | } ____cacheline_aligned_in_smp; | 240 | } ____cacheline_aligned_in_smp; |
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key) | |||
253 | smp_mb__after_atomic_inc(); | 254 | smp_mb__after_atomic_inc(); |
254 | } | 255 | } |
255 | 256 | ||
256 | static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) | 257 | /* |
258 | * Reflects a new waiter being added to the waitqueue. | ||
259 | */ | ||
260 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) | ||
257 | { | 261 | { |
258 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
263 | atomic_inc(&hb->waiters); | ||
259 | /* | 264 | /* |
260 | * Tasks trying to enter the critical region are most likely | 265 | * Full barrier (A), see the ordering comment above. |
261 | * potential waiters that will be added to the plist. Ensure | ||
262 | * that wakers won't miss to-be-slept tasks in the window between | ||
263 | * the wait call and the actual plist_add. | ||
264 | */ | 266 | */ |
265 | if (spin_is_locked(&hb->lock)) | 267 | smp_mb__after_atomic_inc(); |
266 | return true; | 268 | #endif |
267 | smp_rmb(); /* Make sure we check the lock state first */ | 269 | } |
270 | |||
271 | /* | ||
272 | * Reflects a waiter being removed from the waitqueue by wakeup | ||
273 | * paths. | ||
274 | */ | ||
275 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) | ||
276 | { | ||
277 | #ifdef CONFIG_SMP | ||
278 | atomic_dec(&hb->waiters); | ||
279 | #endif | ||
280 | } | ||
268 | 281 | ||
269 | return !plist_head_empty(&hb->chain); | 282 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
283 | { | ||
284 | #ifdef CONFIG_SMP | ||
285 | return atomic_read(&hb->waiters); | ||
270 | #else | 286 | #else |
271 | return true; | 287 | return 1; |
272 | #endif | 288 | #endif |
273 | } | 289 | } |
274 | 290 | ||
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q) | |||
954 | 970 | ||
955 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 971 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
956 | plist_del(&q->list, &hb->chain); | 972 | plist_del(&q->list, &hb->chain); |
973 | hb_waiters_dec(hb); | ||
957 | } | 974 | } |
958 | 975 | ||
959 | /* | 976 | /* |
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1257 | */ | 1274 | */ |
1258 | if (likely(&hb1->chain != &hb2->chain)) { | 1275 | if (likely(&hb1->chain != &hb2->chain)) { |
1259 | plist_del(&q->list, &hb1->chain); | 1276 | plist_del(&q->list, &hb1->chain); |
1277 | hb_waiters_dec(hb1); | ||
1260 | plist_add(&q->list, &hb2->chain); | 1278 | plist_add(&q->list, &hb2->chain); |
1279 | hb_waiters_inc(hb2); | ||
1261 | q->lock_ptr = &hb2->lock; | 1280 | q->lock_ptr = &hb2->lock; |
1262 | } | 1281 | } |
1263 | get_futex_key_refs(key2); | 1282 | get_futex_key_refs(key2); |
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
1600 | struct futex_hash_bucket *hb; | 1619 | struct futex_hash_bucket *hb; |
1601 | 1620 | ||
1602 | hb = hash_futex(&q->key); | 1621 | hb = hash_futex(&q->key); |
1622 | |||
1623 | /* | ||
1624 | * Increment the counter before taking the lock so that | ||
1625 | * a potential waker won't miss a to-be-slept task that is | ||
1626 | * waiting for the spinlock. This is safe as all queue_lock() | ||
1627 | * users end up calling queue_me(). Similarly, for housekeeping, | ||
1628 | * decrement the counter at queue_unlock() when some error has | ||
1629 | * occurred and we don't end up adding the task to the list. | ||
1630 | */ | ||
1631 | hb_waiters_inc(hb); | ||
1632 | |||
1603 | q->lock_ptr = &hb->lock; | 1633 | q->lock_ptr = &hb->lock; |
1604 | 1634 | ||
1605 | spin_lock(&hb->lock); /* implies MB (A) */ | 1635 | spin_lock(&hb->lock); /* implies MB (A) */ |
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb) | |||
1611 | __releases(&hb->lock) | 1641 | __releases(&hb->lock) |
1612 | { | 1642 | { |
1613 | spin_unlock(&hb->lock); | 1643 | spin_unlock(&hb->lock); |
1644 | hb_waiters_dec(hb); | ||
1614 | } | 1645 | } |
1615 | 1646 | ||
1616 | /** | 1647 | /** |
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2342 | * Unqueue the futex_q and determine which it was. | 2373 | * Unqueue the futex_q and determine which it was. |
2343 | */ | 2374 | */ |
2344 | plist_del(&q->list, &hb->chain); | 2375 | plist_del(&q->list, &hb->chain); |
2376 | hb_waiters_dec(hb); | ||
2345 | 2377 | ||
2346 | /* Handle spurious wakeups gracefully */ | 2378 | /* Handle spurious wakeups gracefully */ |
2347 | ret = -EWOULDBLOCK; | 2379 | ret = -EWOULDBLOCK; |
@@ -2875,6 +2907,7 @@ static int __init futex_init(void) | |||
2875 | futex_cmpxchg_enabled = 1; | 2907 | futex_cmpxchg_enabled = 1; |
2876 | 2908 | ||
2877 | for (i = 0; i < futex_hashsize; i++) { | 2909 | for (i = 0; i < futex_hashsize; i++) { |
2910 | atomic_set(&futex_queues[i].waiters, 0); | ||
2878 | plist_head_init(&futex_queues[i].chain); | 2911 | plist_head_init(&futex_queues[i].chain); |
2879 | spin_lock_init(&futex_queues[i].lock); | 2912 | spin_lock_init(&futex_queues[i].lock); |
2880 | } | 2913 | } |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
13 | #include <linux/of_irq.h> | ||
13 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..d3bf660cb57f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
802 | 802 | ||
803 | static void wake_threads_waitq(struct irq_desc *desc) | 803 | static void wake_threads_waitq(struct irq_desc *desc) |
804 | { | 804 | { |
805 | if (atomic_dec_and_test(&desc->threads_active) && | 805 | if (atomic_dec_and_test(&desc->threads_active)) |
806 | waitqueue_active(&desc->wait_for_threads)) | ||
807 | wake_up(&desc->wait_for_threads); | 806 | wake_up(&desc->wait_for_threads); |
808 | } | 807 | } |
809 | 808 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..ebdd9c1a86b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
549 | struct page *page; | 549 | struct page *page; |
550 | 550 | ||
551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
553 | 0); | 553 | 0); |
554 | if (!page) | 554 | if (!page) |
555 | goto out_cleanup; | 555 | goto out_cleanup; |
556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
560 | 0); | 560 | 0); |
561 | if (!page) | 561 | if (!page) |
562 | goto out_cleanup; | 562 | goto out_cleanup; |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..b30a2924ef14 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu) | |||
301 | if (unlikely(!sched_clock_running)) | 301 | if (unlikely(!sched_clock_running)) |
302 | return 0ull; | 302 | return 0ull; |
303 | 303 | ||
304 | preempt_disable(); | 304 | preempt_disable_notrace(); |
305 | scd = cpu_sdc(cpu); | 305 | scd = cpu_sdc(cpu); |
306 | 306 | ||
307 | if (cpu != smp_processor_id()) | 307 | if (cpu != smp_processor_id()) |
308 | clock = sched_clock_remote(scd); | 308 | clock = sched_clock_remote(scd); |
309 | else | 309 | else |
310 | clock = sched_clock_local(scd); | 310 | clock = sched_clock_local(scd); |
311 | preempt_enable(); | 311 | preempt_enable_notrace(); |
312 | 312 | ||
313 | return clock; | 313 | return clock; |
314 | } | 314 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ece..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3338,6 +3338,15 @@ recheck: | |||
3338 | return -EPERM; | 3338 | return -EPERM; |
3339 | } | 3339 | } |
3340 | 3340 | ||
3341 | /* | ||
3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
3343 | * (safest behavior); in the future we would like to allow | ||
3344 | * unprivileged DL tasks to increase their relative deadline | ||
3345 | * or reduce their runtime (both ways reducing utilization) | ||
3346 | */ | ||
3347 | if (dl_policy(policy)) | ||
3348 | return -EPERM; | ||
3349 | |||
3341 | /* | 3350 | /* |
3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b8838b56d1c..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
70 | 70 | ||
71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
72 | { | 72 | { |
73 | WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
74 | 74 | ||
75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | out: | 119 | out: |
120 | WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
121 | 121 | ||
122 | return best_cpu; | 122 | return best_cpu; |
123 | } | 123 | } |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
136 | { | 136 | { |
137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
139 | 138 | ||
140 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
141 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
146 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
147 | { | 146 | { |
148 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
149 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
150 | 148 | ||
151 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
152 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
564 | return 1; | 562 | return 1; |
565 | } | 563 | } |
566 | 564 | ||
565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
566 | |||
567 | /* | 567 | /* |
568 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
569 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
627 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
628 | 628 | ||
629 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
630 | rt_rq->rt_time += delta_exec; | ||
631 | /* | 630 | /* |
632 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
633 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
633 | * bandwidth is relevant. | ||
634 | */ | 634 | */ |
635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
636 | rt_rq->rt_time += delta_exec; | ||
635 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
636 | } | 638 | } |
637 | } | 639 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78157099b167..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
7002 | 7002 | ||
7003 | /* | 7003 | /* |
7004 | * Ensure the task's vruntime is normalized, so that when its | 7004 | * Ensure the task's vruntime is normalized, so that when it's |
7005 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will |
7006 | * do the right thing. | 7006 | * do the right thing. |
7007 | * | 7007 | * |
7008 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7008 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
7009 | * have normalized the vruntime, if it was !on_rq, then only when | 7009 | * have normalized the vruntime, if it's !on_rq, then only when |
7010 | * the task is sleeping will it still have non-normalized vruntime. | 7010 | * the task is sleeping will it still have non-normalized vruntime. |
7011 | */ | 7011 | */ |
7012 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7012 | if (!p->on_rq && p->state != TASK_RUNNING) { |
7013 | /* | 7013 | /* |
7014 | * Fix up our vruntime so that the current sleep doesn't | 7014 | * Fix up our vruntime so that the current sleep doesn't |
7015 | * cause 'unlimited' sleep bonus. | 7015 | * cause 'unlimited' sleep bonus. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
538 | 538 | ||
539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 539 | #endif /* CONFIG_RT_GROUP_SCHED */ |
540 | 540 | ||
541 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
542 | { | ||
543 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
544 | |||
545 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
546 | rt_rq->rt_time < rt_b->rt_runtime); | ||
547 | } | ||
548 | |||
541 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
542 | /* | 550 | /* |
543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 551 | * We ran out of runtime, see if we can borrow some from our neighbours. |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
293 | */ | 293 | */ |
294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
296 | &call_args, 0); | 296 | &call_args, 1); |
297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
298 | preempt_enable(); | 298 | preempt_enable(); |
299 | 299 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..7b16d40bd64d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -27,12 +27,6 @@ | |||
27 | 27 | ||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | DEFINE_MUTEX(event_storage_mutex); | ||
31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
32 | |||
33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
34 | EXPORT_SYMBOL_GPL(event_storage); | ||
35 | |||
36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
38 | 32 | ||
@@ -1777,6 +1771,16 @@ static void trace_module_add_events(struct module *mod) | |||
1777 | { | 1771 | { |
1778 | struct ftrace_event_call **call, **start, **end; | 1772 | struct ftrace_event_call **call, **start, **end; |
1779 | 1773 | ||
1774 | if (!mod->num_trace_events) | ||
1775 | return; | ||
1776 | |||
1777 | /* Don't add infrastructure for mods without tracepoints */ | ||
1778 | if (trace_module_has_bad_taint(mod)) { | ||
1779 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
1780 | mod->name); | ||
1781 | return; | ||
1782 | } | ||
1783 | |||
1780 | start = mod->trace_events; | 1784 | start = mod->trace_events; |
1781 | end = mod->trace_events + mod->num_trace_events; | 1785 | end = mod->trace_events + mod->num_trace_events; |
1782 | 1786 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..ee0a5098ac43 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
95 | #undef __array | 95 | #undef __array |
96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
97 | do { \ | 97 | do { \ |
98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
100 | snprintf(event_storage, sizeof(event_storage), \ | ||
101 | "%s[%d]", #type, len); \ | ||
102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
106 | mutex_unlock(&event_storage_mutex); \ | ||
107 | if (ret) \ | 104 | if (ret) \ |
108 | return ret; \ | 105 | return ret; \ |
109 | } while (0); | 106 | } while (0); |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..031cc5655a51 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
632 | 632 | ||
633 | #ifdef CONFIG_MODULES | 633 | #ifdef CONFIG_MODULES |
634 | bool trace_module_has_bad_taint(struct module *mod) | ||
635 | { | ||
636 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); | ||
637 | } | ||
638 | |||
634 | static int tracepoint_module_coming(struct module *mod) | 639 | static int tracepoint_module_coming(struct module *mod) |
635 | { | 640 | { |
636 | struct tp_module *tp_mod, *iter; | 641 | struct tp_module *tp_mod, *iter; |
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) | |||
641 | * module headers (for forced load), to make sure we don't cause a crash. | 646 | * module headers (for forced load), to make sure we don't cause a crash. |
642 | * Staging and out-of-tree GPL modules are fine. | 647 | * Staging and out-of-tree GPL modules are fine. |
643 | */ | 648 | */ |
644 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) | 649 | if (trace_module_has_bad_taint(mod)) |
645 | return 0; | 650 | return 0; |
646 | mutex_lock(&tracepoints_mutex); | 651 | mutex_lock(&tracepoints_mutex); |
647 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 652 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |