diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/audit.c | 31 | ||||
| -rw-r--r-- | kernel/audit.h | 2 | ||||
| -rw-r--r-- | kernel/auditfilter.c | 10 | ||||
| -rw-r--r-- | kernel/cgroup.c | 11 | ||||
| -rw-r--r-- | kernel/futex.c | 53 | ||||
| -rw-r--r-- | kernel/profile.c | 4 | ||||
| -rw-r--r-- | kernel/sched/clock.c | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 9 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 7 |
11 files changed, 90 insertions, 49 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..3392d3e0254a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -182,7 +182,7 @@ struct audit_buffer { | |||
| 182 | 182 | ||
| 183 | struct audit_reply { | 183 | struct audit_reply { |
| 184 | __u32 portid; | 184 | __u32 portid; |
| 185 | pid_t pid; | 185 | struct net *net; |
| 186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
| 500 | { | 500 | { |
| 501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
| 502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
| 503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
| 504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 505 | 505 | ||
| 506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
| @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
| 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
| 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
| 512 | 512 | ||
| 513 | put_net(net); | ||
| 513 | kfree(dest); | 514 | kfree(dest); |
| 514 | 515 | ||
| 515 | return 0; | 516 | return 0; |
| @@ -543,7 +544,7 @@ out_kfree_skb: | |||
| 543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
| 544 | { | 545 | { |
| 545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
| 546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
| 547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 548 | 549 | ||
| 549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
| @@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
| 552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
| 553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
| 554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
| 556 | put_net(net); | ||
| 555 | kfree(reply); | 557 | kfree(reply); |
| 556 | return 0; | 558 | return 0; |
| 557 | } | 559 | } |
| 558 | /** | 560 | /** |
| 559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
| 560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 561 | * @seq: sequence number | 563 | * @seq: sequence number |
| 562 | * @type: audit message type | 564 | * @type: audit message type |
| 563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
| @@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
| 568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
| 569 | * No failure notifications. | 571 | * No failure notifications. |
| 570 | */ | 572 | */ |
| 571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
| 572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
| 573 | { | 575 | { |
| 576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
| 575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
| 576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
| @@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
| 583 | if (!skb) | 587 | if (!skb) |
| 584 | goto out; | 588 | goto out; |
| 585 | 589 | ||
| 590 | reply->net = get_net(net); | ||
| 586 | reply->portid = portid; | 591 | reply->portid = portid; |
| 587 | reply->pid = task_pid_vnr(current); | ||
| 588 | reply->skb = skb; | 592 | reply->skb = skb; |
| 589 | 593 | ||
| 590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
| @@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
| 673 | 677 | ||
| 674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 678 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
| 675 | 679 | ||
| 676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 680 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
| 677 | &af, sizeof(af)); | ||
| 678 | 681 | ||
| 679 | return 0; | 682 | return 0; |
| 680 | } | 683 | } |
| @@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 794 | s.backlog = skb_queue_len(&audit_skb_queue); | 797 | s.backlog = skb_queue_len(&audit_skb_queue); |
| 795 | s.version = AUDIT_VERSION_LATEST; | 798 | s.version = AUDIT_VERSION_LATEST; |
| 796 | s.backlog_wait_time = audit_backlog_wait_time; | 799 | s.backlog_wait_time = audit_backlog_wait_time; |
| 797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 800 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
| 798 | &s, sizeof(s)); | ||
| 799 | break; | 801 | break; |
| 800 | } | 802 | } |
| 801 | case AUDIT_SET: { | 803 | case AUDIT_SET: { |
| @@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 905 | seq, data, nlmsg_len(nlh)); | 907 | seq, data, nlmsg_len(nlh)); |
| 906 | break; | 908 | break; |
| 907 | case AUDIT_LIST_RULES: | 909 | case AUDIT_LIST_RULES: |
| 908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 910 | err = audit_list_rules_send(skb, seq); |
| 909 | break; | 911 | break; |
| 910 | case AUDIT_TRIM: | 912 | case AUDIT_TRIM: |
| 911 | audit_trim_trees(); | 913 | audit_trim_trees(); |
| @@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 970 | memcpy(sig_data->ctx, ctx, len); | 972 | memcpy(sig_data->ctx, ctx, len); |
| 971 | security_release_secctx(ctx, len); | 973 | security_release_secctx(ctx, len); |
| 972 | } | 974 | } |
| 973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 975 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
| 974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 976 | sig_data, sizeof(*sig_data) + len); |
| 975 | kfree(sig_data); | 977 | kfree(sig_data); |
| 976 | break; | 978 | break; |
| 977 | case AUDIT_TTY_GET: { | 979 | case AUDIT_TTY_GET: { |
| @@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 985 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
| 984 | spin_unlock(&tsk->sighand->siglock); | 986 | spin_unlock(&tsk->sighand->siglock); |
| 985 | 987 | ||
| 986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 988 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
| 987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
| 988 | break; | 989 | break; |
| 989 | } | 990 | } |
| 990 | case AUDIT_TTY_SET: { | 991 | case AUDIT_TTY_SET: { |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..8df132214606 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -247,7 +247,7 @@ extern void audit_panic(const char *message); | |||
| 247 | 247 | ||
| 248 | struct audit_netlink_list { | 248 | struct audit_netlink_list { |
| 249 | __u32 portid; | 249 | __u32 portid; |
| 250 | pid_t pid; | 250 | struct net *net; |
| 251 | struct sk_buff_head q; | 251 | struct sk_buff_head q; |
| 252 | }; | 252 | }; |
| 253 | 253 | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..92062fd6cc8c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
| 32 | #include <net/net_namespace.h> | ||
| 33 | #include <net/sock.h> | ||
| 32 | #include "audit.h" | 34 | #include "audit.h" |
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| @@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
| 1065 | 1067 | ||
| 1066 | /** | 1068 | /** |
| 1067 | * audit_list_rules_send - list the audit rules | 1069 | * audit_list_rules_send - list the audit rules |
| 1068 | * @portid: target portid for netlink audit messages | 1070 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 1069 | * @seq: netlink audit message sequence (serial) number | 1071 | * @seq: netlink audit message sequence (serial) number |
| 1070 | */ | 1072 | */ |
| 1071 | int audit_list_rules_send(__u32 portid, int seq) | 1073 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
| 1072 | { | 1074 | { |
| 1075 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 1076 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 1073 | struct task_struct *tsk; | 1077 | struct task_struct *tsk; |
| 1074 | struct audit_netlink_list *dest; | 1078 | struct audit_netlink_list *dest; |
| 1075 | int err = 0; | 1079 | int err = 0; |
| @@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
| 1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1087 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
| 1084 | if (!dest) | 1088 | if (!dest) |
| 1085 | return -ENOMEM; | 1089 | return -ENOMEM; |
| 1090 | dest->net = get_net(net); | ||
| 1086 | dest->portid = portid; | 1091 | dest->portid = portid; |
| 1087 | dest->pid = task_pid_vnr(current); | ||
| 1088 | skb_queue_head_init(&dest->q); | 1092 | skb_queue_head_init(&dest->q); |
| 1089 | 1093 | ||
| 1090 | mutex_lock(&audit_filter_mutex); | 1094 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 105f273b6f86..0c753ddd223b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4112 | 4112 | ||
| 4113 | err = percpu_ref_init(&css->refcnt, css_release); | 4113 | err = percpu_ref_init(&css->refcnt, css_release); |
| 4114 | if (err) | 4114 | if (err) |
| 4115 | goto err_free; | 4115 | goto err_free_css; |
| 4116 | 4116 | ||
| 4117 | init_css(css, ss, cgrp); | 4117 | init_css(css, ss, cgrp); |
| 4118 | 4118 | ||
| 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); |
| 4120 | if (err) | 4120 | if (err) |
| 4121 | goto err_free; | 4121 | goto err_free_percpu_ref; |
| 4122 | 4122 | ||
| 4123 | err = online_css(css); | 4123 | err = online_css(css); |
| 4124 | if (err) | 4124 | if (err) |
| 4125 | goto err_free; | 4125 | goto err_clear_dir; |
| 4126 | 4126 | ||
| 4127 | dget(cgrp->dentry); | 4127 | dget(cgrp->dentry); |
| 4128 | css_get(css->parent); | 4128 | css_get(css->parent); |
| @@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4138 | 4138 | ||
| 4139 | return 0; | 4139 | return 0; |
| 4140 | 4140 | ||
| 4141 | err_free: | 4141 | err_clear_dir: |
| 4142 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | ||
| 4143 | err_free_percpu_ref: | ||
| 4142 | percpu_ref_cancel_init(&css->refcnt); | 4144 | percpu_ref_cancel_init(&css->refcnt); |
| 4145 | err_free_css: | ||
| 4143 | ss->css_free(css); | 4146 | ss->css_free(css); |
| 4144 | return err; | 4147 | return err; |
| 4145 | } | 4148 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..08ec814ad9d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = { | |||
| 234 | * waiting on a futex. | 234 | * waiting on a futex. |
| 235 | */ | 235 | */ |
| 236 | struct futex_hash_bucket { | 236 | struct futex_hash_bucket { |
| 237 | atomic_t waiters; | ||
| 237 | spinlock_t lock; | 238 | spinlock_t lock; |
| 238 | struct plist_head chain; | 239 | struct plist_head chain; |
| 239 | } ____cacheline_aligned_in_smp; | 240 | } ____cacheline_aligned_in_smp; |
| @@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key) | |||
| 253 | smp_mb__after_atomic_inc(); | 254 | smp_mb__after_atomic_inc(); |
| 254 | } | 255 | } |
| 255 | 256 | ||
| 256 | static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) | 257 | /* |
| 258 | * Reflects a new waiter being added to the waitqueue. | ||
| 259 | */ | ||
| 260 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) | ||
| 257 | { | 261 | { |
| 258 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
| 263 | atomic_inc(&hb->waiters); | ||
| 259 | /* | 264 | /* |
| 260 | * Tasks trying to enter the critical region are most likely | 265 | * Full barrier (A), see the ordering comment above. |
| 261 | * potential waiters that will be added to the plist. Ensure | ||
| 262 | * that wakers won't miss to-be-slept tasks in the window between | ||
| 263 | * the wait call and the actual plist_add. | ||
| 264 | */ | 266 | */ |
| 265 | if (spin_is_locked(&hb->lock)) | 267 | smp_mb__after_atomic_inc(); |
| 266 | return true; | 268 | #endif |
| 267 | smp_rmb(); /* Make sure we check the lock state first */ | 269 | } |
| 270 | |||
| 271 | /* | ||
| 272 | * Reflects a waiter being removed from the waitqueue by wakeup | ||
| 273 | * paths. | ||
| 274 | */ | ||
| 275 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) | ||
| 276 | { | ||
| 277 | #ifdef CONFIG_SMP | ||
| 278 | atomic_dec(&hb->waiters); | ||
| 279 | #endif | ||
| 280 | } | ||
| 268 | 281 | ||
| 269 | return !plist_head_empty(&hb->chain); | 282 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
| 283 | { | ||
| 284 | #ifdef CONFIG_SMP | ||
| 285 | return atomic_read(&hb->waiters); | ||
| 270 | #else | 286 | #else |
| 271 | return true; | 287 | return 1; |
| 272 | #endif | 288 | #endif |
| 273 | } | 289 | } |
| 274 | 290 | ||
| @@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q) | |||
| 954 | 970 | ||
| 955 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 971 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
| 956 | plist_del(&q->list, &hb->chain); | 972 | plist_del(&q->list, &hb->chain); |
| 973 | hb_waiters_dec(hb); | ||
| 957 | } | 974 | } |
| 958 | 975 | ||
| 959 | /* | 976 | /* |
| @@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
| 1257 | */ | 1274 | */ |
| 1258 | if (likely(&hb1->chain != &hb2->chain)) { | 1275 | if (likely(&hb1->chain != &hb2->chain)) { |
| 1259 | plist_del(&q->list, &hb1->chain); | 1276 | plist_del(&q->list, &hb1->chain); |
| 1277 | hb_waiters_dec(hb1); | ||
| 1260 | plist_add(&q->list, &hb2->chain); | 1278 | plist_add(&q->list, &hb2->chain); |
| 1279 | hb_waiters_inc(hb2); | ||
| 1261 | q->lock_ptr = &hb2->lock; | 1280 | q->lock_ptr = &hb2->lock; |
| 1262 | } | 1281 | } |
| 1263 | get_futex_key_refs(key2); | 1282 | get_futex_key_refs(key2); |
| @@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
| 1600 | struct futex_hash_bucket *hb; | 1619 | struct futex_hash_bucket *hb; |
| 1601 | 1620 | ||
| 1602 | hb = hash_futex(&q->key); | 1621 | hb = hash_futex(&q->key); |
| 1622 | |||
| 1623 | /* | ||
| 1624 | * Increment the counter before taking the lock so that | ||
| 1625 | * a potential waker won't miss a to-be-slept task that is | ||
| 1626 | * waiting for the spinlock. This is safe as all queue_lock() | ||
| 1627 | * users end up calling queue_me(). Similarly, for housekeeping, | ||
| 1628 | * decrement the counter at queue_unlock() when some error has | ||
| 1629 | * occurred and we don't end up adding the task to the list. | ||
| 1630 | */ | ||
| 1631 | hb_waiters_inc(hb); | ||
| 1632 | |||
| 1603 | q->lock_ptr = &hb->lock; | 1633 | q->lock_ptr = &hb->lock; |
| 1604 | 1634 | ||
| 1605 | spin_lock(&hb->lock); /* implies MB (A) */ | 1635 | spin_lock(&hb->lock); /* implies MB (A) */ |
| @@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb) | |||
| 1611 | __releases(&hb->lock) | 1641 | __releases(&hb->lock) |
| 1612 | { | 1642 | { |
| 1613 | spin_unlock(&hb->lock); | 1643 | spin_unlock(&hb->lock); |
| 1644 | hb_waiters_dec(hb); | ||
| 1614 | } | 1645 | } |
| 1615 | 1646 | ||
| 1616 | /** | 1647 | /** |
| @@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2342 | * Unqueue the futex_q and determine which it was. | 2373 | * Unqueue the futex_q and determine which it was. |
| 2343 | */ | 2374 | */ |
| 2344 | plist_del(&q->list, &hb->chain); | 2375 | plist_del(&q->list, &hb->chain); |
| 2376 | hb_waiters_dec(hb); | ||
| 2345 | 2377 | ||
| 2346 | /* Handle spurious wakeups gracefully */ | 2378 | /* Handle spurious wakeups gracefully */ |
| 2347 | ret = -EWOULDBLOCK; | 2379 | ret = -EWOULDBLOCK; |
| @@ -2875,6 +2907,7 @@ static int __init futex_init(void) | |||
| 2875 | futex_cmpxchg_enabled = 1; | 2907 | futex_cmpxchg_enabled = 1; |
| 2876 | 2908 | ||
| 2877 | for (i = 0; i < futex_hashsize; i++) { | 2909 | for (i = 0; i < futex_hashsize; i++) { |
| 2910 | atomic_set(&futex_queues[i].waiters, 0); | ||
| 2878 | plist_head_init(&futex_queues[i].chain); | 2911 | plist_head_init(&futex_queues[i].chain); |
| 2879 | spin_lock_init(&futex_queues[i].lock); | 2912 | spin_lock_init(&futex_queues[i].lock); |
| 2880 | } | 2913 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..ebdd9c1a86b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
| 549 | struct page *page; | 549 | struct page *page; |
| 550 | 550 | ||
| 551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
| 552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 553 | 0); | 553 | 0); |
| 554 | if (!page) | 554 | if (!page) |
| 555 | goto out_cleanup; | 555 | goto out_cleanup; |
| 556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
| 557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
| 558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
| 559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 560 | 0); | 560 | 0); |
| 561 | if (!page) | 561 | if (!page) |
| 562 | goto out_cleanup; | 562 | goto out_cleanup; |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..b30a2924ef14 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu) | |||
| 301 | if (unlikely(!sched_clock_running)) | 301 | if (unlikely(!sched_clock_running)) |
| 302 | return 0ull; | 302 | return 0ull; |
| 303 | 303 | ||
| 304 | preempt_disable(); | 304 | preempt_disable_notrace(); |
| 305 | scd = cpu_sdc(cpu); | 305 | scd = cpu_sdc(cpu); |
| 306 | 306 | ||
| 307 | if (cpu != smp_processor_id()) | 307 | if (cpu != smp_processor_id()) |
| 308 | clock = sched_clock_remote(scd); | 308 | clock = sched_clock_remote(scd); |
| 309 | else | 309 | else |
| 310 | clock = sched_clock_local(scd); | 310 | clock = sched_clock_local(scd); |
| 311 | preempt_enable(); | 311 | preempt_enable_notrace(); |
| 312 | 312 | ||
| 313 | return clock; | 313 | return clock; |
| 314 | } | 314 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ece..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -3338,6 +3338,15 @@ recheck: | |||
| 3338 | return -EPERM; | 3338 | return -EPERM; |
| 3339 | } | 3339 | } |
| 3340 | 3340 | ||
| 3341 | /* | ||
| 3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
| 3343 | * (safest behavior); in the future we would like to allow | ||
| 3344 | * unprivileged DL tasks to increase their relative deadline | ||
| 3345 | * or reduce their runtime (both ways reducing utilization) | ||
| 3346 | */ | ||
| 3347 | if (dl_policy(policy)) | ||
| 3348 | return -EPERM; | ||
| 3349 | |||
| 3341 | /* | 3350 | /* |
| 3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
| 293 | */ | 293 | */ |
| 294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
| 295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
| 296 | &call_args, 0); | 296 | &call_args, 1); |
| 297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
| 298 | preempt_enable(); | 298 | preempt_enable(); |
| 299 | 299 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f3989ceb5cd5..7b16d40bd64d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -27,12 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
| 29 | 29 | ||
| 30 | DEFINE_MUTEX(event_storage_mutex); | ||
| 31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
| 32 | |||
| 33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
| 34 | EXPORT_SYMBOL_GPL(event_storage); | ||
| 35 | |||
| 36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
| 37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
| 38 | 32 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..ee0a5098ac43 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
| 95 | #undef __array | 95 | #undef __array |
| 96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
| 97 | do { \ | 97 | do { \ |
| 98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
| 98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
| 100 | snprintf(event_storage, sizeof(event_storage), \ | ||
| 101 | "%s[%d]", #type, len); \ | ||
| 102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
| 103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
| 104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
| 105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
| 106 | mutex_unlock(&event_storage_mutex); \ | ||
| 107 | if (ret) \ | 104 | if (ret) \ |
| 108 | return ret; \ | 105 | return ret; \ |
| 109 | } while (0); | 106 | } while (0); |
