diff options
| author | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2014-04-14 11:00:36 -0400 |
|---|---|---|
| committer | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2014-04-14 11:00:36 -0400 |
| commit | 277a163c83d7ba93fba1e8980d29a9f8bfcfba6c (patch) | |
| tree | ccfd357d152292958957b6b8a993892e7a8cc95f /kernel | |
| parent | a83b93a7480441a47856dc9104bea970e84cda87 (diff) | |
| parent | c9eaa447e77efe77b7fa4c953bd62de8297fd6c5 (diff) | |
Merge tag 'v3.15-rc1' into patchwork
Linux 3.15-rc1
* tag 'v3.15-rc1': (12180 commits)
Linux 3.15-rc1
mm: Initialize error in shmem_file_aio_read()
cifs: Use min_t() when comparing "size_t" and "unsigned long"
sym53c8xx_2: Set DID_REQUEUE return code when aborting squeue
powerpc: Don't try to set LPCR unless we're in hypervisor mode
futex: update documentation for ordering guarantees
ceph: fix pr_fmt() redefinition
vti: don't allow to add the same tunnel twice
gre: don't allow to add the same tunnel twice
drivers: net: xen-netfront: fix array initialization bug
missing bits of "splice: fix racy pipe->buffers uses"
cifs: fix the race in cifs_writev()
ceph_sync_{,direct_}write: fix an oops on ceph_osdc_new_request() failure
pktgen: be friendly to LLTX devices
r8152: check RTL8152_UNPLUG
net: sun4i-emac: add promiscuous support
net/apne: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO
blackfin: cleanup board files
bf609: clock: drop unused clock bit set/clear functions
Blackfin: bf537: rename "CONFIG_ADT75"
...
Diffstat (limited to 'kernel')
137 files changed, 6963 insertions, 5386 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index bc010ee272b6..f2a8b6246ce9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -18,11 +18,13 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg | |||
| 18 | CFLAGS_REMOVE_irq_work.o = -pg | 18 | CFLAGS_REMOVE_irq_work.o = -pg |
| 19 | endif | 19 | endif |
| 20 | 20 | ||
| 21 | # cond_syscall is currently not LTO compatible | ||
| 22 | CFLAGS_sys_ni.o = $(DISABLE_LTO) | ||
| 23 | |||
| 21 | obj-y += sched/ | 24 | obj-y += sched/ |
| 22 | obj-y += locking/ | 25 | obj-y += locking/ |
| 23 | obj-y += power/ | 26 | obj-y += power/ |
| 24 | obj-y += printk/ | 27 | obj-y += printk/ |
| 25 | obj-y += cpu/ | ||
| 26 | obj-y += irq/ | 28 | obj-y += irq/ |
| 27 | obj-y += rcu/ | 29 | obj-y += rcu/ |
| 28 | 30 | ||
| @@ -93,6 +95,7 @@ obj-$(CONFIG_PADATA) += padata.o | |||
| 93 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 95 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
| 94 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 96 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
| 95 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o | 97 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o |
| 98 | obj-$(CONFIG_TORTURE_TEST) += torture.o | ||
| 96 | 99 | ||
| 97 | $(obj)/configs.o: $(obj)/config_data.h | 100 | $(obj)/configs.o: $(obj)/config_data.h |
| 98 | 101 | ||
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..7c2893602d06 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -182,7 +182,7 @@ struct audit_buffer { | |||
| 182 | 182 | ||
| 183 | struct audit_reply { | 183 | struct audit_reply { |
| 184 | __u32 portid; | 184 | __u32 portid; |
| 185 | pid_t pid; | 185 | struct net *net; |
| 186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -396,7 +396,7 @@ static void audit_printk_skb(struct sk_buff *skb) | |||
| 396 | if (printk_ratelimit()) | 396 | if (printk_ratelimit()) |
| 397 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); | 397 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); |
| 398 | else | 398 | else |
| 399 | audit_log_lost("printk limit exceeded\n"); | 399 | audit_log_lost("printk limit exceeded"); |
| 400 | } | 400 | } |
| 401 | 401 | ||
| 402 | audit_hold_skb(skb); | 402 | audit_hold_skb(skb); |
| @@ -412,7 +412,7 @@ static void kauditd_send_skb(struct sk_buff *skb) | |||
| 412 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ | 412 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ |
| 413 | if (audit_pid) { | 413 | if (audit_pid) { |
| 414 | pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); | 414 | pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); |
| 415 | audit_log_lost("auditd disappeared\n"); | 415 | audit_log_lost("auditd disappeared"); |
| 416 | audit_pid = 0; | 416 | audit_pid = 0; |
| 417 | audit_sock = NULL; | 417 | audit_sock = NULL; |
| 418 | } | 418 | } |
| @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
| 500 | { | 500 | { |
| 501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
| 502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
| 503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
| 504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 505 | 505 | ||
| 506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
| @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
| 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
| 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
| 512 | 512 | ||
| 513 | put_net(net); | ||
| 513 | kfree(dest); | 514 | kfree(dest); |
| 514 | 515 | ||
| 515 | return 0; | 516 | return 0; |
| @@ -543,7 +544,7 @@ out_kfree_skb: | |||
| 543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
| 544 | { | 545 | { |
| 545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
| 546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
| 547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 548 | 549 | ||
| 549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
| @@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
| 552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
| 553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
| 554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
| 556 | put_net(net); | ||
| 555 | kfree(reply); | 557 | kfree(reply); |
| 556 | return 0; | 558 | return 0; |
| 557 | } | 559 | } |
| 558 | /** | 560 | /** |
| 559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
| 560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 561 | * @seq: sequence number | 563 | * @seq: sequence number |
| 562 | * @type: audit message type | 564 | * @type: audit message type |
| 563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
| @@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
| 568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
| 569 | * No failure notifications. | 571 | * No failure notifications. |
| 570 | */ | 572 | */ |
| 571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
| 572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
| 573 | { | 575 | { |
| 576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
| 575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
| 576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
| @@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
| 583 | if (!skb) | 587 | if (!skb) |
| 584 | goto out; | 588 | goto out; |
| 585 | 589 | ||
| 590 | reply->net = get_net(net); | ||
| 586 | reply->portid = portid; | 591 | reply->portid = portid; |
| 587 | reply->pid = task_pid_vnr(current); | ||
| 588 | reply->skb = skb; | 592 | reply->skb = skb; |
| 589 | 593 | ||
| 590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
| @@ -603,10 +607,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
| 603 | { | 607 | { |
| 604 | int err = 0; | 608 | int err = 0; |
| 605 | 609 | ||
| 606 | /* Only support the initial namespaces for now. */ | 610 | /* Only support initial user namespace for now. */ |
| 607 | if ((current_user_ns() != &init_user_ns) || | 611 | /* |
| 608 | (task_active_pid_ns(current) != &init_pid_ns)) | 612 | * We return ECONNREFUSED because it tricks userspace into thinking |
| 609 | return -EPERM; | 613 | * that audit was not configured into the kernel. Lots of users |
| 614 | * configure their PAM stack (because that's what the distro does) | ||
| 615 | * to reject login if unable to send messages to audit. If we return | ||
| 616 | * ECONNREFUSED the PAM stack thinks the kernel does not have audit | ||
| 617 | * configured in and will let login proceed. If we return EPERM | ||
| 618 | * userspace will reject all logins. This should be removed when we | ||
| 619 | * support non init namespaces!! | ||
| 620 | */ | ||
| 621 | if (current_user_ns() != &init_user_ns) | ||
| 622 | return -ECONNREFUSED; | ||
| 610 | 623 | ||
| 611 | switch (msg_type) { | 624 | switch (msg_type) { |
| 612 | case AUDIT_LIST: | 625 | case AUDIT_LIST: |
| @@ -625,6 +638,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
| 625 | case AUDIT_TTY_SET: | 638 | case AUDIT_TTY_SET: |
| 626 | case AUDIT_TRIM: | 639 | case AUDIT_TRIM: |
| 627 | case AUDIT_MAKE_EQUIV: | 640 | case AUDIT_MAKE_EQUIV: |
| 641 | /* Only support auditd and auditctl in initial pid namespace | ||
| 642 | * for now. */ | ||
| 643 | if ((task_active_pid_ns(current) != &init_pid_ns)) | ||
| 644 | return -EPERM; | ||
| 645 | |||
| 628 | if (!capable(CAP_AUDIT_CONTROL)) | 646 | if (!capable(CAP_AUDIT_CONTROL)) |
| 629 | err = -EPERM; | 647 | err = -EPERM; |
| 630 | break; | 648 | break; |
| @@ -645,6 +663,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) | |||
| 645 | { | 663 | { |
| 646 | int rc = 0; | 664 | int rc = 0; |
| 647 | uid_t uid = from_kuid(&init_user_ns, current_uid()); | 665 | uid_t uid = from_kuid(&init_user_ns, current_uid()); |
| 666 | pid_t pid = task_tgid_nr(current); | ||
| 648 | 667 | ||
| 649 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) { | 668 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) { |
| 650 | *ab = NULL; | 669 | *ab = NULL; |
| @@ -654,7 +673,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) | |||
| 654 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); | 673 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); |
| 655 | if (unlikely(!*ab)) | 674 | if (unlikely(!*ab)) |
| 656 | return rc; | 675 | return rc; |
| 657 | audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid); | 676 | audit_log_format(*ab, "pid=%d uid=%u", pid, uid); |
| 658 | audit_log_session_info(*ab); | 677 | audit_log_session_info(*ab); |
| 659 | audit_log_task_context(*ab); | 678 | audit_log_task_context(*ab); |
| 660 | 679 | ||
| @@ -673,8 +692,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
| 673 | 692 | ||
| 674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 693 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
| 675 | 694 | ||
| 676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 695 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
| 677 | &af, sizeof(af)); | ||
| 678 | 696 | ||
| 679 | return 0; | 697 | return 0; |
| 680 | } | 698 | } |
| @@ -794,8 +812,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 794 | s.backlog = skb_queue_len(&audit_skb_queue); | 812 | s.backlog = skb_queue_len(&audit_skb_queue); |
| 795 | s.version = AUDIT_VERSION_LATEST; | 813 | s.version = AUDIT_VERSION_LATEST; |
| 796 | s.backlog_wait_time = audit_backlog_wait_time; | 814 | s.backlog_wait_time = audit_backlog_wait_time; |
| 797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 815 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
| 798 | &s, sizeof(s)); | ||
| 799 | break; | 816 | break; |
| 800 | } | 817 | } |
| 801 | case AUDIT_SET: { | 818 | case AUDIT_SET: { |
| @@ -905,7 +922,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 905 | seq, data, nlmsg_len(nlh)); | 922 | seq, data, nlmsg_len(nlh)); |
| 906 | break; | 923 | break; |
| 907 | case AUDIT_LIST_RULES: | 924 | case AUDIT_LIST_RULES: |
| 908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 925 | err = audit_list_rules_send(skb, seq); |
| 909 | break; | 926 | break; |
| 910 | case AUDIT_TRIM: | 927 | case AUDIT_TRIM: |
| 911 | audit_trim_trees(); | 928 | audit_trim_trees(); |
| @@ -970,8 +987,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 970 | memcpy(sig_data->ctx, ctx, len); | 987 | memcpy(sig_data->ctx, ctx, len); |
| 971 | security_release_secctx(ctx, len); | 988 | security_release_secctx(ctx, len); |
| 972 | } | 989 | } |
| 973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 990 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
| 974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 991 | sig_data, sizeof(*sig_data) + len); |
| 975 | kfree(sig_data); | 992 | kfree(sig_data); |
| 976 | break; | 993 | break; |
| 977 | case AUDIT_TTY_GET: { | 994 | case AUDIT_TTY_GET: { |
| @@ -983,8 +1000,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 1000 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
| 984 | spin_unlock(&tsk->sighand->siglock); | 1001 | spin_unlock(&tsk->sighand->siglock); |
| 985 | 1002 | ||
| 986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 1003 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
| 987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
| 988 | break; | 1004 | break; |
| 989 | } | 1005 | } |
| 990 | case AUDIT_TTY_SET: { | 1006 | case AUDIT_TTY_SET: { |
| @@ -1086,7 +1102,7 @@ static void __net_exit audit_net_exit(struct net *net) | |||
| 1086 | audit_sock = NULL; | 1102 | audit_sock = NULL; |
| 1087 | } | 1103 | } |
| 1088 | 1104 | ||
| 1089 | rcu_assign_pointer(aunet->nlsk, NULL); | 1105 | RCU_INIT_POINTER(aunet->nlsk, NULL); |
| 1090 | synchronize_net(); | 1106 | synchronize_net(); |
| 1091 | netlink_kernel_release(sock); | 1107 | netlink_kernel_release(sock); |
| 1092 | } | 1108 | } |
| @@ -1818,11 +1834,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | |||
| 1818 | spin_unlock_irq(&tsk->sighand->siglock); | 1834 | spin_unlock_irq(&tsk->sighand->siglock); |
| 1819 | 1835 | ||
| 1820 | audit_log_format(ab, | 1836 | audit_log_format(ab, |
| 1821 | " ppid=%ld pid=%d auid=%u uid=%u gid=%u" | 1837 | " ppid=%d pid=%d auid=%u uid=%u gid=%u" |
| 1822 | " euid=%u suid=%u fsuid=%u" | 1838 | " euid=%u suid=%u fsuid=%u" |
| 1823 | " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", | 1839 | " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", |
| 1824 | sys_getppid(), | 1840 | task_ppid_nr(tsk), |
| 1825 | tsk->pid, | 1841 | task_pid_nr(tsk), |
| 1826 | from_kuid(&init_user_ns, audit_get_loginuid(tsk)), | 1842 | from_kuid(&init_user_ns, audit_get_loginuid(tsk)), |
| 1827 | from_kuid(&init_user_ns, cred->uid), | 1843 | from_kuid(&init_user_ns, cred->uid), |
| 1828 | from_kgid(&init_user_ns, cred->gid), | 1844 | from_kgid(&init_user_ns, cred->gid), |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..7bb65730c890 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -106,6 +106,11 @@ struct audit_names { | |||
| 106 | bool should_free; | 106 | bool should_free; |
| 107 | }; | 107 | }; |
| 108 | 108 | ||
| 109 | struct audit_proctitle { | ||
| 110 | int len; /* length of the cmdline field. */ | ||
| 111 | char *value; /* the cmdline field */ | ||
| 112 | }; | ||
| 113 | |||
| 109 | /* The per-task audit context. */ | 114 | /* The per-task audit context. */ |
| 110 | struct audit_context { | 115 | struct audit_context { |
| 111 | int dummy; /* must be the first element */ | 116 | int dummy; /* must be the first element */ |
| @@ -202,6 +207,7 @@ struct audit_context { | |||
| 202 | } execve; | 207 | } execve; |
| 203 | }; | 208 | }; |
| 204 | int fds[2]; | 209 | int fds[2]; |
| 210 | struct audit_proctitle proctitle; | ||
| 205 | 211 | ||
| 206 | #if AUDIT_DEBUG | 212 | #if AUDIT_DEBUG |
| 207 | int put_count; | 213 | int put_count; |
| @@ -247,7 +253,7 @@ extern void audit_panic(const char *message); | |||
| 247 | 253 | ||
| 248 | struct audit_netlink_list { | 254 | struct audit_netlink_list { |
| 249 | __u32 portid; | 255 | __u32 portid; |
| 250 | pid_t pid; | 256 | struct net *net; |
| 251 | struct sk_buff_head q; | 257 | struct sk_buff_head q; |
| 252 | }; | 258 | }; |
| 253 | 259 | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..8e9bc9c3dbb7 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -19,6 +19,8 @@ | |||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 23 | |||
| 22 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| 23 | #include <linux/audit.h> | 25 | #include <linux/audit.h> |
| 24 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
| @@ -29,6 +31,8 @@ | |||
| 29 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
| 30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
| 31 | #include <linux/security.h> | 33 | #include <linux/security.h> |
| 34 | #include <net/net_namespace.h> | ||
| 35 | #include <net/sock.h> | ||
| 32 | #include "audit.h" | 36 | #include "audit.h" |
| 33 | 37 | ||
| 34 | /* | 38 | /* |
| @@ -224,7 +228,7 @@ static int audit_match_signal(struct audit_entry *entry) | |||
| 224 | #endif | 228 | #endif |
| 225 | 229 | ||
| 226 | /* Common user-space to kernel rule translation. */ | 230 | /* Common user-space to kernel rule translation. */ |
| 227 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | 231 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule) |
| 228 | { | 232 | { |
| 229 | unsigned listnr; | 233 | unsigned listnr; |
| 230 | struct audit_entry *entry; | 234 | struct audit_entry *entry; |
| @@ -247,7 +251,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | |||
| 247 | ; | 251 | ; |
| 248 | } | 252 | } |
| 249 | if (unlikely(rule->action == AUDIT_POSSIBLE)) { | 253 | if (unlikely(rule->action == AUDIT_POSSIBLE)) { |
| 250 | printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n"); | 254 | pr_err("AUDIT_POSSIBLE is deprecated\n"); |
| 251 | goto exit_err; | 255 | goto exit_err; |
| 252 | } | 256 | } |
| 253 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) | 257 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) |
| @@ -401,7 +405,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 401 | int i; | 405 | int i; |
| 402 | char *str; | 406 | char *str; |
| 403 | 407 | ||
| 404 | entry = audit_to_entry_common((struct audit_rule *)data); | 408 | entry = audit_to_entry_common(data); |
| 405 | if (IS_ERR(entry)) | 409 | if (IS_ERR(entry)) |
| 406 | goto exit_nofree; | 410 | goto exit_nofree; |
| 407 | 411 | ||
| @@ -429,6 +433,19 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 429 | f->val = 0; | 433 | f->val = 0; |
| 430 | } | 434 | } |
| 431 | 435 | ||
| 436 | if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) { | ||
| 437 | struct pid *pid; | ||
| 438 | rcu_read_lock(); | ||
| 439 | pid = find_vpid(f->val); | ||
| 440 | if (!pid) { | ||
| 441 | rcu_read_unlock(); | ||
| 442 | err = -ESRCH; | ||
| 443 | goto exit_free; | ||
| 444 | } | ||
| 445 | f->val = pid_nr(pid); | ||
| 446 | rcu_read_unlock(); | ||
| 447 | } | ||
| 448 | |||
| 432 | err = audit_field_valid(entry, f); | 449 | err = audit_field_valid(entry, f); |
| 433 | if (err) | 450 | if (err) |
| 434 | goto exit_free; | 451 | goto exit_free; |
| @@ -477,8 +494,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 477 | /* Keep currently invalid fields around in case they | 494 | /* Keep currently invalid fields around in case they |
| 478 | * become valid after a policy reload. */ | 495 | * become valid after a policy reload. */ |
| 479 | if (err == -EINVAL) { | 496 | if (err == -EINVAL) { |
| 480 | printk(KERN_WARNING "audit rule for LSM " | 497 | pr_warn("audit rule for LSM \'%s\' is invalid\n", |
| 481 | "\'%s\' is invalid\n", str); | 498 | str); |
| 482 | err = 0; | 499 | err = 0; |
| 483 | } | 500 | } |
| 484 | if (err) { | 501 | if (err) { |
| @@ -707,8 +724,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df, | |||
| 707 | /* Keep currently invalid fields around in case they | 724 | /* Keep currently invalid fields around in case they |
| 708 | * become valid after a policy reload. */ | 725 | * become valid after a policy reload. */ |
| 709 | if (ret == -EINVAL) { | 726 | if (ret == -EINVAL) { |
| 710 | printk(KERN_WARNING "audit rule for LSM \'%s\' is " | 727 | pr_warn("audit rule for LSM \'%s\' is invalid\n", |
| 711 | "invalid\n", df->lsm_str); | 728 | df->lsm_str); |
| 712 | ret = 0; | 729 | ret = 0; |
| 713 | } | 730 | } |
| 714 | 731 | ||
| @@ -1065,11 +1082,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
| 1065 | 1082 | ||
| 1066 | /** | 1083 | /** |
| 1067 | * audit_list_rules_send - list the audit rules | 1084 | * audit_list_rules_send - list the audit rules |
| 1068 | * @portid: target portid for netlink audit messages | 1085 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 1069 | * @seq: netlink audit message sequence (serial) number | 1086 | * @seq: netlink audit message sequence (serial) number |
| 1070 | */ | 1087 | */ |
| 1071 | int audit_list_rules_send(__u32 portid, int seq) | 1088 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
| 1072 | { | 1089 | { |
| 1090 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 1091 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 1073 | struct task_struct *tsk; | 1092 | struct task_struct *tsk; |
| 1074 | struct audit_netlink_list *dest; | 1093 | struct audit_netlink_list *dest; |
| 1075 | int err = 0; | 1094 | int err = 0; |
| @@ -1083,8 +1102,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
| 1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1102 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
| 1084 | if (!dest) | 1103 | if (!dest) |
| 1085 | return -ENOMEM; | 1104 | return -ENOMEM; |
| 1105 | dest->net = get_net(net); | ||
| 1086 | dest->portid = portid; | 1106 | dest->portid = portid; |
| 1087 | dest->pid = task_pid_vnr(current); | ||
| 1088 | skb_queue_head_init(&dest->q); | 1107 | skb_queue_head_init(&dest->q); |
| 1089 | 1108 | ||
| 1090 | mutex_lock(&audit_filter_mutex); | 1109 | mutex_lock(&audit_filter_mutex); |
| @@ -1236,12 +1255,14 @@ static int audit_filter_user_rules(struct audit_krule *rule, int type, | |||
| 1236 | 1255 | ||
| 1237 | for (i = 0; i < rule->field_count; i++) { | 1256 | for (i = 0; i < rule->field_count; i++) { |
| 1238 | struct audit_field *f = &rule->fields[i]; | 1257 | struct audit_field *f = &rule->fields[i]; |
| 1258 | pid_t pid; | ||
| 1239 | int result = 0; | 1259 | int result = 0; |
| 1240 | u32 sid; | 1260 | u32 sid; |
| 1241 | 1261 | ||
| 1242 | switch (f->type) { | 1262 | switch (f->type) { |
| 1243 | case AUDIT_PID: | 1263 | case AUDIT_PID: |
| 1244 | result = audit_comparator(task_pid_vnr(current), f->op, f->val); | 1264 | pid = task_pid_nr(current); |
| 1265 | result = audit_comparator(pid, f->op, f->val); | ||
| 1245 | break; | 1266 | break; |
| 1246 | case AUDIT_UID: | 1267 | case AUDIT_UID: |
| 1247 | result = audit_uid_comparator(current_uid(), f->op, f->uid); | 1268 | result = audit_uid_comparator(current_uid(), f->op, f->uid); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 7aef2f4b6c64..f251a5e8d17a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. | 42 | * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 46 | |||
| 45 | #include <linux/init.h> | 47 | #include <linux/init.h> |
| 46 | #include <asm/types.h> | 48 | #include <asm/types.h> |
| 47 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
| @@ -68,6 +70,7 @@ | |||
| 68 | #include <linux/capability.h> | 70 | #include <linux/capability.h> |
| 69 | #include <linux/fs_struct.h> | 71 | #include <linux/fs_struct.h> |
| 70 | #include <linux/compat.h> | 72 | #include <linux/compat.h> |
| 73 | #include <linux/ctype.h> | ||
| 71 | 74 | ||
| 72 | #include "audit.h" | 75 | #include "audit.h" |
| 73 | 76 | ||
| @@ -79,6 +82,9 @@ | |||
| 79 | /* no execve audit message should be longer than this (userspace limits) */ | 82 | /* no execve audit message should be longer than this (userspace limits) */ |
| 80 | #define MAX_EXECVE_AUDIT_LEN 7500 | 83 | #define MAX_EXECVE_AUDIT_LEN 7500 |
| 81 | 84 | ||
| 85 | /* max length to print of cmdline/proctitle value during audit */ | ||
| 86 | #define MAX_PROCTITLE_AUDIT_LEN 128 | ||
| 87 | |||
| 82 | /* number of audit rules */ | 88 | /* number of audit rules */ |
| 83 | int audit_n_rules; | 89 | int audit_n_rules; |
| 84 | 90 | ||
| @@ -451,15 +457,17 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 451 | struct audit_field *f = &rule->fields[i]; | 457 | struct audit_field *f = &rule->fields[i]; |
| 452 | struct audit_names *n; | 458 | struct audit_names *n; |
| 453 | int result = 0; | 459 | int result = 0; |
| 460 | pid_t pid; | ||
| 454 | 461 | ||
| 455 | switch (f->type) { | 462 | switch (f->type) { |
| 456 | case AUDIT_PID: | 463 | case AUDIT_PID: |
| 457 | result = audit_comparator(tsk->pid, f->op, f->val); | 464 | pid = task_pid_nr(tsk); |
| 465 | result = audit_comparator(pid, f->op, f->val); | ||
| 458 | break; | 466 | break; |
| 459 | case AUDIT_PPID: | 467 | case AUDIT_PPID: |
| 460 | if (ctx) { | 468 | if (ctx) { |
| 461 | if (!ctx->ppid) | 469 | if (!ctx->ppid) |
| 462 | ctx->ppid = sys_getppid(); | 470 | ctx->ppid = task_ppid_nr(tsk); |
| 463 | result = audit_comparator(ctx->ppid, f->op, f->val); | 471 | result = audit_comparator(ctx->ppid, f->op, f->val); |
| 464 | } | 472 | } |
| 465 | break; | 473 | break; |
| @@ -805,7 +813,8 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) | |||
| 805 | rcu_read_unlock(); | 813 | rcu_read_unlock(); |
| 806 | } | 814 | } |
| 807 | 815 | ||
| 808 | static inline struct audit_context *audit_get_context(struct task_struct *tsk, | 816 | /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ |
| 817 | static inline struct audit_context *audit_take_context(struct task_struct *tsk, | ||
| 809 | int return_valid, | 818 | int return_valid, |
| 810 | long return_code) | 819 | long return_code) |
| 811 | { | 820 | { |
| @@ -842,6 +851,13 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, | |||
| 842 | return context; | 851 | return context; |
| 843 | } | 852 | } |
| 844 | 853 | ||
| 854 | static inline void audit_proctitle_free(struct audit_context *context) | ||
| 855 | { | ||
| 856 | kfree(context->proctitle.value); | ||
| 857 | context->proctitle.value = NULL; | ||
| 858 | context->proctitle.len = 0; | ||
| 859 | } | ||
| 860 | |||
| 845 | static inline void audit_free_names(struct audit_context *context) | 861 | static inline void audit_free_names(struct audit_context *context) |
| 846 | { | 862 | { |
| 847 | struct audit_names *n, *next; | 863 | struct audit_names *n, *next; |
| @@ -850,16 +866,15 @@ static inline void audit_free_names(struct audit_context *context) | |||
| 850 | if (context->put_count + context->ino_count != context->name_count) { | 866 | if (context->put_count + context->ino_count != context->name_count) { |
| 851 | int i = 0; | 867 | int i = 0; |
| 852 | 868 | ||
| 853 | printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" | 869 | pr_err("%s:%d(:%d): major=%d in_syscall=%d" |
| 854 | " name_count=%d put_count=%d" | 870 | " name_count=%d put_count=%d ino_count=%d" |
| 855 | " ino_count=%d [NOT freeing]\n", | 871 | " [NOT freeing]\n", __FILE__, __LINE__, |
| 856 | __FILE__, __LINE__, | ||
| 857 | context->serial, context->major, context->in_syscall, | 872 | context->serial, context->major, context->in_syscall, |
| 858 | context->name_count, context->put_count, | 873 | context->name_count, context->put_count, |
| 859 | context->ino_count); | 874 | context->ino_count); |
| 860 | list_for_each_entry(n, &context->names_list, list) { | 875 | list_for_each_entry(n, &context->names_list, list) { |
| 861 | printk(KERN_ERR "names[%d] = %p = %s\n", i++, | 876 | pr_err("names[%d] = %p = %s\n", i++, n->name, |
| 862 | n->name, n->name->name ?: "(null)"); | 877 | n->name->name ?: "(null)"); |
| 863 | } | 878 | } |
| 864 | dump_stack(); | 879 | dump_stack(); |
| 865 | return; | 880 | return; |
| @@ -955,6 +970,7 @@ static inline void audit_free_context(struct audit_context *context) | |||
| 955 | audit_free_aux(context); | 970 | audit_free_aux(context); |
| 956 | kfree(context->filterkey); | 971 | kfree(context->filterkey); |
| 957 | kfree(context->sockaddr); | 972 | kfree(context->sockaddr); |
| 973 | audit_proctitle_free(context); | ||
| 958 | kfree(context); | 974 | kfree(context); |
| 959 | } | 975 | } |
| 960 | 976 | ||
| @@ -1157,7 +1173,7 @@ static void audit_log_execve_info(struct audit_context *context, | |||
| 1157 | */ | 1173 | */ |
| 1158 | buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); | 1174 | buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); |
| 1159 | if (!buf) { | 1175 | if (!buf) { |
| 1160 | audit_panic("out of memory for argv string\n"); | 1176 | audit_panic("out of memory for argv string"); |
| 1161 | return; | 1177 | return; |
| 1162 | } | 1178 | } |
| 1163 | 1179 | ||
| @@ -1271,6 +1287,59 @@ static void show_special(struct audit_context *context, int *call_panic) | |||
| 1271 | audit_log_end(ab); | 1287 | audit_log_end(ab); |
| 1272 | } | 1288 | } |
| 1273 | 1289 | ||
| 1290 | static inline int audit_proctitle_rtrim(char *proctitle, int len) | ||
| 1291 | { | ||
| 1292 | char *end = proctitle + len - 1; | ||
| 1293 | while (end > proctitle && !isprint(*end)) | ||
| 1294 | end--; | ||
| 1295 | |||
| 1296 | /* catch the case where proctitle is only 1 non-print character */ | ||
| 1297 | len = end - proctitle + 1; | ||
| 1298 | len -= isprint(proctitle[len-1]) == 0; | ||
| 1299 | return len; | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | static void audit_log_proctitle(struct task_struct *tsk, | ||
| 1303 | struct audit_context *context) | ||
| 1304 | { | ||
| 1305 | int res; | ||
| 1306 | char *buf; | ||
| 1307 | char *msg = "(null)"; | ||
| 1308 | int len = strlen(msg); | ||
| 1309 | struct audit_buffer *ab; | ||
| 1310 | |||
| 1311 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); | ||
| 1312 | if (!ab) | ||
| 1313 | return; /* audit_panic or being filtered */ | ||
| 1314 | |||
| 1315 | audit_log_format(ab, "proctitle="); | ||
| 1316 | |||
| 1317 | /* Not cached */ | ||
| 1318 | if (!context->proctitle.value) { | ||
| 1319 | buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); | ||
| 1320 | if (!buf) | ||
| 1321 | goto out; | ||
| 1322 | /* Historically called this from procfs naming */ | ||
| 1323 | res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); | ||
| 1324 | if (res == 0) { | ||
| 1325 | kfree(buf); | ||
| 1326 | goto out; | ||
| 1327 | } | ||
| 1328 | res = audit_proctitle_rtrim(buf, res); | ||
| 1329 | if (res == 0) { | ||
| 1330 | kfree(buf); | ||
| 1331 | goto out; | ||
| 1332 | } | ||
| 1333 | context->proctitle.value = buf; | ||
| 1334 | context->proctitle.len = res; | ||
| 1335 | } | ||
| 1336 | msg = context->proctitle.value; | ||
| 1337 | len = context->proctitle.len; | ||
| 1338 | out: | ||
| 1339 | audit_log_n_untrustedstring(ab, msg, len); | ||
| 1340 | audit_log_end(ab); | ||
| 1341 | } | ||
| 1342 | |||
| 1274 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) | 1343 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) |
| 1275 | { | 1344 | { |
| 1276 | int i, call_panic = 0; | 1345 | int i, call_panic = 0; |
| @@ -1388,6 +1457,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 1388 | audit_log_name(context, n, NULL, i++, &call_panic); | 1457 | audit_log_name(context, n, NULL, i++, &call_panic); |
| 1389 | } | 1458 | } |
| 1390 | 1459 | ||
| 1460 | audit_log_proctitle(tsk, context); | ||
| 1461 | |||
| 1391 | /* Send end of event record to help user space know we are finished */ | 1462 | /* Send end of event record to help user space know we are finished */ |
| 1392 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); | 1463 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); |
| 1393 | if (ab) | 1464 | if (ab) |
| @@ -1406,7 +1477,7 @@ void __audit_free(struct task_struct *tsk) | |||
| 1406 | { | 1477 | { |
| 1407 | struct audit_context *context; | 1478 | struct audit_context *context; |
| 1408 | 1479 | ||
| 1409 | context = audit_get_context(tsk, 0, 0); | 1480 | context = audit_take_context(tsk, 0, 0); |
| 1410 | if (!context) | 1481 | if (!context) |
| 1411 | return; | 1482 | return; |
| 1412 | 1483 | ||
| @@ -1500,7 +1571,7 @@ void __audit_syscall_exit(int success, long return_code) | |||
| 1500 | else | 1571 | else |
| 1501 | success = AUDITSC_FAILURE; | 1572 | success = AUDITSC_FAILURE; |
| 1502 | 1573 | ||
| 1503 | context = audit_get_context(tsk, success, return_code); | 1574 | context = audit_take_context(tsk, success, return_code); |
| 1504 | if (!context) | 1575 | if (!context) |
| 1505 | return; | 1576 | return; |
| 1506 | 1577 | ||
| @@ -1550,7 +1621,7 @@ static inline void handle_one(const struct inode *inode) | |||
| 1550 | if (likely(put_tree_ref(context, chunk))) | 1621 | if (likely(put_tree_ref(context, chunk))) |
| 1551 | return; | 1622 | return; |
| 1552 | if (unlikely(!grow_tree_refs(context))) { | 1623 | if (unlikely(!grow_tree_refs(context))) { |
| 1553 | printk(KERN_WARNING "out of memory, audit has lost a tree reference\n"); | 1624 | pr_warn("out of memory, audit has lost a tree reference\n"); |
| 1554 | audit_set_auditable(context); | 1625 | audit_set_auditable(context); |
| 1555 | audit_put_chunk(chunk); | 1626 | audit_put_chunk(chunk); |
| 1556 | unroll_tree_refs(context, p, count); | 1627 | unroll_tree_refs(context, p, count); |
| @@ -1609,8 +1680,7 @@ retry: | |||
| 1609 | goto retry; | 1680 | goto retry; |
| 1610 | } | 1681 | } |
| 1611 | /* too bad */ | 1682 | /* too bad */ |
| 1612 | printk(KERN_WARNING | 1683 | pr_warn("out of memory, audit has lost a tree reference\n"); |
| 1613 | "out of memory, audit has lost a tree reference\n"); | ||
| 1614 | unroll_tree_refs(context, p, count); | 1684 | unroll_tree_refs(context, p, count); |
| 1615 | audit_set_auditable(context); | 1685 | audit_set_auditable(context); |
| 1616 | return; | 1686 | return; |
| @@ -1682,7 +1752,7 @@ void __audit_getname(struct filename *name) | |||
| 1682 | 1752 | ||
| 1683 | if (!context->in_syscall) { | 1753 | if (!context->in_syscall) { |
| 1684 | #if AUDIT_DEBUG == 2 | 1754 | #if AUDIT_DEBUG == 2 |
| 1685 | printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n", | 1755 | pr_err("%s:%d(:%d): ignoring getname(%p)\n", |
| 1686 | __FILE__, __LINE__, context->serial, name); | 1756 | __FILE__, __LINE__, context->serial, name); |
| 1687 | dump_stack(); | 1757 | dump_stack(); |
| 1688 | #endif | 1758 | #endif |
| @@ -1721,15 +1791,15 @@ void audit_putname(struct filename *name) | |||
| 1721 | BUG_ON(!context); | 1791 | BUG_ON(!context); |
| 1722 | if (!name->aname || !context->in_syscall) { | 1792 | if (!name->aname || !context->in_syscall) { |
| 1723 | #if AUDIT_DEBUG == 2 | 1793 | #if AUDIT_DEBUG == 2 |
| 1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", | 1794 | pr_err("%s:%d(:%d): final_putname(%p)\n", |
| 1725 | __FILE__, __LINE__, context->serial, name); | 1795 | __FILE__, __LINE__, context->serial, name); |
| 1726 | if (context->name_count) { | 1796 | if (context->name_count) { |
| 1727 | struct audit_names *n; | 1797 | struct audit_names *n; |
| 1728 | int i = 0; | 1798 | int i = 0; |
| 1729 | 1799 | ||
| 1730 | list_for_each_entry(n, &context->names_list, list) | 1800 | list_for_each_entry(n, &context->names_list, list) |
| 1731 | printk(KERN_ERR "name[%d] = %p = %s\n", i++, | 1801 | pr_err("name[%d] = %p = %s\n", i++, n->name, |
| 1732 | n->name, n->name->name ?: "(null)"); | 1802 | n->name->name ?: "(null)"); |
| 1733 | } | 1803 | } |
| 1734 | #endif | 1804 | #endif |
| 1735 | final_putname(name); | 1805 | final_putname(name); |
| @@ -1738,9 +1808,8 @@ void audit_putname(struct filename *name) | |||
| 1738 | else { | 1808 | else { |
| 1739 | ++context->put_count; | 1809 | ++context->put_count; |
| 1740 | if (context->put_count > context->name_count) { | 1810 | if (context->put_count > context->name_count) { |
| 1741 | printk(KERN_ERR "%s:%d(:%d): major=%d" | 1811 | pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)" |
| 1742 | " in_syscall=%d putname(%p) name_count=%d" | 1812 | " name_count=%d put_count=%d\n", |
| 1743 | " put_count=%d\n", | ||
| 1744 | __FILE__, __LINE__, | 1813 | __FILE__, __LINE__, |
| 1745 | context->serial, context->major, | 1814 | context->serial, context->major, |
| 1746 | context->in_syscall, name->name, | 1815 | context->in_syscall, name->name, |
| @@ -1981,12 +2050,10 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, | |||
| 1981 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); | 2050 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); |
| 1982 | if (!ab) | 2051 | if (!ab) |
| 1983 | return; | 2052 | return; |
| 1984 | audit_log_format(ab, "pid=%d uid=%u" | 2053 | audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); |
| 1985 | " old-auid=%u new-auid=%u old-ses=%u new-ses=%u" | 2054 | audit_log_task_context(ab); |
| 1986 | " res=%d", | 2055 | audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", |
| 1987 | current->pid, uid, | 2056 | oldloginuid, loginuid, oldsessionid, sessionid, !rc); |
| 1988 | oldloginuid, loginuid, oldsessionid, sessionid, | ||
| 1989 | !rc); | ||
| 1990 | audit_log_end(ab); | 2057 | audit_log_end(ab); |
| 1991 | } | 2058 | } |
| 1992 | 2059 | ||
| @@ -2208,7 +2275,7 @@ void __audit_ptrace(struct task_struct *t) | |||
| 2208 | { | 2275 | { |
| 2209 | struct audit_context *context = current->audit_context; | 2276 | struct audit_context *context = current->audit_context; |
| 2210 | 2277 | ||
| 2211 | context->target_pid = t->pid; | 2278 | context->target_pid = task_pid_nr(t); |
| 2212 | context->target_auid = audit_get_loginuid(t); | 2279 | context->target_auid = audit_get_loginuid(t); |
| 2213 | context->target_uid = task_uid(t); | 2280 | context->target_uid = task_uid(t); |
| 2214 | context->target_sessionid = audit_get_sessionid(t); | 2281 | context->target_sessionid = audit_get_sessionid(t); |
| @@ -2233,7 +2300,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
| 2233 | 2300 | ||
| 2234 | if (audit_pid && t->tgid == audit_pid) { | 2301 | if (audit_pid && t->tgid == audit_pid) { |
| 2235 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { | 2302 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
| 2236 | audit_sig_pid = tsk->pid; | 2303 | audit_sig_pid = task_pid_nr(tsk); |
| 2237 | if (uid_valid(tsk->loginuid)) | 2304 | if (uid_valid(tsk->loginuid)) |
| 2238 | audit_sig_uid = tsk->loginuid; | 2305 | audit_sig_uid = tsk->loginuid; |
| 2239 | else | 2306 | else |
| @@ -2247,7 +2314,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
| 2247 | /* optimize the common case by putting first signal recipient directly | 2314 | /* optimize the common case by putting first signal recipient directly |
| 2248 | * in audit_context */ | 2315 | * in audit_context */ |
| 2249 | if (!ctx->target_pid) { | 2316 | if (!ctx->target_pid) { |
| 2250 | ctx->target_pid = t->tgid; | 2317 | ctx->target_pid = task_tgid_nr(t); |
| 2251 | ctx->target_auid = audit_get_loginuid(t); | 2318 | ctx->target_auid = audit_get_loginuid(t); |
| 2252 | ctx->target_uid = t_uid; | 2319 | ctx->target_uid = t_uid; |
| 2253 | ctx->target_sessionid = audit_get_sessionid(t); | 2320 | ctx->target_sessionid = audit_get_sessionid(t); |
| @@ -2268,7 +2335,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
| 2268 | } | 2335 | } |
| 2269 | BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); | 2336 | BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); |
| 2270 | 2337 | ||
| 2271 | axp->target_pid[axp->pid_count] = t->tgid; | 2338 | axp->target_pid[axp->pid_count] = task_tgid_nr(t); |
| 2272 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); | 2339 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); |
| 2273 | axp->target_uid[axp->pid_count] = t_uid; | 2340 | axp->target_uid[axp->pid_count] = t_uid; |
| 2274 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); | 2341 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); |
| @@ -2368,7 +2435,7 @@ static void audit_log_task(struct audit_buffer *ab) | |||
| 2368 | from_kgid(&init_user_ns, gid), | 2435 | from_kgid(&init_user_ns, gid), |
| 2369 | sessionid); | 2436 | sessionid); |
| 2370 | audit_log_task_context(ab); | 2437 | audit_log_task_context(ab); |
| 2371 | audit_log_format(ab, " pid=%d comm=", current->pid); | 2438 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); |
| 2372 | audit_log_untrustedstring(ab, current->comm); | 2439 | audit_log_untrustedstring(ab, current->comm); |
| 2373 | if (mm) { | 2440 | if (mm) { |
| 2374 | down_read(&mm->mmap_sem); | 2441 | down_read(&mm->mmap_sem); |
diff --git a/kernel/capability.c b/kernel/capability.c index 34019c57888d..a8d63df0c322 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> | 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 11 | |||
| 10 | #include <linux/audit.h> | 12 | #include <linux/audit.h> |
| 11 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
| 12 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
| @@ -42,15 +44,10 @@ __setup("no_file_caps", file_caps_disable); | |||
| 42 | 44 | ||
| 43 | static void warn_legacy_capability_use(void) | 45 | static void warn_legacy_capability_use(void) |
| 44 | { | 46 | { |
| 45 | static int warned; | 47 | char name[sizeof(current->comm)]; |
| 46 | if (!warned) { | 48 | |
| 47 | char name[sizeof(current->comm)]; | 49 | pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", |
| 48 | 50 | get_task_comm(name, current)); | |
| 49 | printk(KERN_INFO "warning: `%s' uses 32-bit capabilities" | ||
| 50 | " (legacy support in use)\n", | ||
| 51 | get_task_comm(name, current)); | ||
| 52 | warned = 1; | ||
| 53 | } | ||
| 54 | } | 51 | } |
| 55 | 52 | ||
| 56 | /* | 53 | /* |
| @@ -71,16 +68,10 @@ static void warn_legacy_capability_use(void) | |||
| 71 | 68 | ||
| 72 | static void warn_deprecated_v2(void) | 69 | static void warn_deprecated_v2(void) |
| 73 | { | 70 | { |
| 74 | static int warned; | 71 | char name[sizeof(current->comm)]; |
| 75 | 72 | ||
| 76 | if (!warned) { | 73 | pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", |
| 77 | char name[sizeof(current->comm)]; | 74 | get_task_comm(name, current)); |
| 78 | |||
| 79 | printk(KERN_INFO "warning: `%s' uses deprecated v2" | ||
| 80 | " capabilities in a way that may be insecure.\n", | ||
| 81 | get_task_comm(name, current)); | ||
| 82 | warned = 1; | ||
| 83 | } | ||
| 84 | } | 75 | } |
| 85 | 76 | ||
| 86 | /* | 77 | /* |
| @@ -380,7 +371,7 @@ bool has_capability_noaudit(struct task_struct *t, int cap) | |||
| 380 | bool ns_capable(struct user_namespace *ns, int cap) | 371 | bool ns_capable(struct user_namespace *ns, int cap) |
| 381 | { | 372 | { |
| 382 | if (unlikely(!cap_valid(cap))) { | 373 | if (unlikely(!cap_valid(cap))) { |
| 383 | printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); | 374 | pr_crit("capable() called with invalid cap=%u\n", cap); |
| 384 | BUG(); | 375 | BUG(); |
| 385 | } | 376 | } |
| 386 | 377 | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 105f273b6f86..9fcdaa705b6c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -40,23 +40,20 @@ | |||
| 40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
| 41 | #include <linux/rcupdate.h> | 41 | #include <linux/rcupdate.h> |
| 42 | #include <linux/sched.h> | 42 | #include <linux/sched.h> |
| 43 | #include <linux/backing-dev.h> | ||
| 44 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 45 | #include <linux/magic.h> | ||
| 46 | #include <linux/spinlock.h> | 44 | #include <linux/spinlock.h> |
| 45 | #include <linux/rwsem.h> | ||
| 47 | #include <linux/string.h> | 46 | #include <linux/string.h> |
| 48 | #include <linux/sort.h> | 47 | #include <linux/sort.h> |
| 49 | #include <linux/kmod.h> | 48 | #include <linux/kmod.h> |
| 50 | #include <linux/module.h> | ||
| 51 | #include <linux/delayacct.h> | 49 | #include <linux/delayacct.h> |
| 52 | #include <linux/cgroupstats.h> | 50 | #include <linux/cgroupstats.h> |
| 53 | #include <linux/hashtable.h> | 51 | #include <linux/hashtable.h> |
| 54 | #include <linux/namei.h> | ||
| 55 | #include <linux/pid_namespace.h> | 52 | #include <linux/pid_namespace.h> |
| 56 | #include <linux/idr.h> | 53 | #include <linux/idr.h> |
| 57 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | 54 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
| 58 | #include <linux/flex_array.h> /* used in cgroup_attach_task */ | ||
| 59 | #include <linux/kthread.h> | 55 | #include <linux/kthread.h> |
| 56 | #include <linux/delay.h> | ||
| 60 | 57 | ||
| 61 | #include <linux/atomic.h> | 58 | #include <linux/atomic.h> |
| 62 | 59 | ||
| @@ -68,43 +65,49 @@ | |||
| 68 | */ | 65 | */ |
| 69 | #define CGROUP_PIDLIST_DESTROY_DELAY HZ | 66 | #define CGROUP_PIDLIST_DESTROY_DELAY HZ |
| 70 | 67 | ||
| 68 | #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ | ||
| 69 | MAX_CFTYPE_NAME + 2) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file | ||
| 73 | * creation/removal and hierarchy changing operations including cgroup | ||
| 74 | * creation, removal, css association and controller rebinding. This outer | ||
| 75 | * lock is needed mainly to resolve the circular dependency between kernfs | ||
| 76 | * active ref and cgroup_mutex. cgroup_tree_mutex nests above both. | ||
| 77 | */ | ||
| 78 | static DEFINE_MUTEX(cgroup_tree_mutex); | ||
| 79 | |||
| 71 | /* | 80 | /* |
| 72 | * cgroup_mutex is the master lock. Any modification to cgroup or its | 81 | * cgroup_mutex is the master lock. Any modification to cgroup or its |
| 73 | * hierarchy must be performed while holding it. | 82 | * hierarchy must be performed while holding it. |
| 74 | * | 83 | * |
| 75 | * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify | 84 | * css_set_rwsem protects task->cgroups pointer, the list of css_set |
| 76 | * cgroupfs_root of any cgroup hierarchy - subsys list, flags, | 85 | * objects, and the chain of tasks off each css_set. |
| 77 | * release_agent_path and so on. Modifying requires both cgroup_mutex and | ||
| 78 | * cgroup_root_mutex. Readers can acquire either of the two. This is to | ||
| 79 | * break the following locking order cycle. | ||
| 80 | * | ||
| 81 | * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem | ||
| 82 | * B. namespace_sem -> cgroup_mutex | ||
| 83 | * | 86 | * |
| 84 | * B happens only through cgroup_show_options() and using cgroup_root_mutex | 87 | * These locks are exported if CONFIG_PROVE_RCU so that accessors in |
| 85 | * breaks it. | 88 | * cgroup.h can use them for lockdep annotations. |
| 86 | */ | 89 | */ |
| 87 | #ifdef CONFIG_PROVE_RCU | 90 | #ifdef CONFIG_PROVE_RCU |
| 88 | DEFINE_MUTEX(cgroup_mutex); | 91 | DEFINE_MUTEX(cgroup_mutex); |
| 89 | EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */ | 92 | DECLARE_RWSEM(css_set_rwsem); |
| 93 | EXPORT_SYMBOL_GPL(cgroup_mutex); | ||
| 94 | EXPORT_SYMBOL_GPL(css_set_rwsem); | ||
| 90 | #else | 95 | #else |
| 91 | static DEFINE_MUTEX(cgroup_mutex); | 96 | static DEFINE_MUTEX(cgroup_mutex); |
| 97 | static DECLARE_RWSEM(css_set_rwsem); | ||
| 92 | #endif | 98 | #endif |
| 93 | 99 | ||
| 94 | static DEFINE_MUTEX(cgroup_root_mutex); | 100 | /* |
| 101 | * Protects cgroup_subsys->release_agent_path. Modifying it also requires | ||
| 102 | * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. | ||
| 103 | */ | ||
| 104 | static DEFINE_SPINLOCK(release_agent_path_lock); | ||
| 95 | 105 | ||
| 96 | #define cgroup_assert_mutex_or_rcu_locked() \ | 106 | #define cgroup_assert_mutexes_or_rcu_locked() \ |
| 97 | rcu_lockdep_assert(rcu_read_lock_held() || \ | 107 | rcu_lockdep_assert(rcu_read_lock_held() || \ |
| 108 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
| 98 | lockdep_is_held(&cgroup_mutex), \ | 109 | lockdep_is_held(&cgroup_mutex), \ |
| 99 | "cgroup_mutex or RCU read lock required"); | 110 | "cgroup_[tree_]mutex or RCU read lock required"); |
| 100 | |||
| 101 | #ifdef CONFIG_LOCKDEP | ||
| 102 | #define cgroup_assert_mutex_or_root_locked() \ | ||
| 103 | WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \ | ||
| 104 | !lockdep_is_held(&cgroup_root_mutex))) | ||
| 105 | #else | ||
| 106 | #define cgroup_assert_mutex_or_root_locked() do { } while (0) | ||
| 107 | #endif | ||
| 108 | 111 | ||
| 109 | /* | 112 | /* |
| 110 | * cgroup destruction makes heavy use of work items and there can be a lot | 113 | * cgroup destruction makes heavy use of work items and there can be a lot |
| @@ -120,42 +123,41 @@ static struct workqueue_struct *cgroup_destroy_wq; | |||
| 120 | */ | 123 | */ |
| 121 | static struct workqueue_struct *cgroup_pidlist_destroy_wq; | 124 | static struct workqueue_struct *cgroup_pidlist_destroy_wq; |
| 122 | 125 | ||
| 123 | /* | 126 | /* generate an array of cgroup subsystem pointers */ |
| 124 | * Generate an array of cgroup subsystem pointers. At boot time, this is | 127 | #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, |
| 125 | * populated with the built in subsystems, and modular subsystems are | 128 | static struct cgroup_subsys *cgroup_subsys[] = { |
| 126 | * registered after that. The mutable section of this array is protected by | 129 | #include <linux/cgroup_subsys.h> |
| 127 | * cgroup_mutex. | 130 | }; |
| 128 | */ | 131 | #undef SUBSYS |
| 129 | #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, | 132 | |
| 130 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) | 133 | /* array of cgroup subsystem names */ |
| 131 | static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { | 134 | #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, |
| 135 | static const char *cgroup_subsys_name[] = { | ||
| 132 | #include <linux/cgroup_subsys.h> | 136 | #include <linux/cgroup_subsys.h> |
| 133 | }; | 137 | }; |
| 138 | #undef SUBSYS | ||
| 134 | 139 | ||
| 135 | /* | 140 | /* |
| 136 | * The dummy hierarchy, reserved for the subsystems that are otherwise | 141 | * The default hierarchy, reserved for the subsystems that are otherwise |
| 137 | * unattached - it never has more than a single cgroup, and all tasks are | 142 | * unattached - it never has more than a single cgroup, and all tasks are |
| 138 | * part of that cgroup. | 143 | * part of that cgroup. |
| 139 | */ | 144 | */ |
| 140 | static struct cgroupfs_root cgroup_dummy_root; | 145 | struct cgroup_root cgrp_dfl_root; |
| 141 | 146 | ||
| 142 | /* dummy_top is a shorthand for the dummy hierarchy's top cgroup */ | 147 | /* |
| 143 | static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; | 148 | * The default hierarchy always exists but is hidden until mounted for the |
| 149 | * first time. This is for backward compatibility. | ||
| 150 | */ | ||
| 151 | static bool cgrp_dfl_root_visible; | ||
| 144 | 152 | ||
| 145 | /* The list of hierarchy roots */ | 153 | /* The list of hierarchy roots */ |
| 146 | 154 | ||
| 147 | static LIST_HEAD(cgroup_roots); | 155 | static LIST_HEAD(cgroup_roots); |
| 148 | static int cgroup_root_count; | 156 | static int cgroup_root_count; |
| 149 | 157 | ||
| 150 | /* | 158 | /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ |
| 151 | * Hierarchy ID allocation and mapping. It follows the same exclusion | ||
| 152 | * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for | ||
| 153 | * writes, either for reads. | ||
| 154 | */ | ||
| 155 | static DEFINE_IDR(cgroup_hierarchy_idr); | 159 | static DEFINE_IDR(cgroup_hierarchy_idr); |
| 156 | 160 | ||
| 157 | static struct cgroup_name root_cgroup_name = { .name = "/" }; | ||
| 158 | |||
| 159 | /* | 161 | /* |
| 160 | * Assign a monotonically increasing serial number to cgroups. It | 162 | * Assign a monotonically increasing serial number to cgroups. It |
| 161 | * guarantees cgroups with bigger numbers are newer than those with smaller | 163 | * guarantees cgroups with bigger numbers are newer than those with smaller |
| @@ -175,11 +177,13 @@ static int need_forkexit_callback __read_mostly; | |||
| 175 | 177 | ||
| 176 | static struct cftype cgroup_base_files[]; | 178 | static struct cftype cgroup_base_files[]; |
| 177 | 179 | ||
| 180 | static void cgroup_put(struct cgroup *cgrp); | ||
| 181 | static int rebind_subsystems(struct cgroup_root *dst_root, | ||
| 182 | unsigned long ss_mask); | ||
| 178 | static void cgroup_destroy_css_killed(struct cgroup *cgrp); | 183 | static void cgroup_destroy_css_killed(struct cgroup *cgrp); |
| 179 | static int cgroup_destroy_locked(struct cgroup *cgrp); | 184 | static int cgroup_destroy_locked(struct cgroup *cgrp); |
| 180 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | 185 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| 181 | bool is_add); | 186 | bool is_add); |
| 182 | static int cgroup_file_release(struct inode *inode, struct file *file); | ||
| 183 | static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); | 187 | static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); |
| 184 | 188 | ||
| 185 | /** | 189 | /** |
| @@ -197,8 +201,9 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, | |||
| 197 | struct cgroup_subsys *ss) | 201 | struct cgroup_subsys *ss) |
| 198 | { | 202 | { |
| 199 | if (ss) | 203 | if (ss) |
| 200 | return rcu_dereference_check(cgrp->subsys[ss->subsys_id], | 204 | return rcu_dereference_check(cgrp->subsys[ss->id], |
| 201 | lockdep_is_held(&cgroup_mutex)); | 205 | lockdep_is_held(&cgroup_tree_mutex) || |
| 206 | lockdep_is_held(&cgroup_mutex)); | ||
| 202 | else | 207 | else |
| 203 | return &cgrp->dummy_css; | 208 | return &cgrp->dummy_css; |
| 204 | } | 209 | } |
| @@ -209,6 +214,27 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp) | |||
| 209 | return test_bit(CGRP_DEAD, &cgrp->flags); | 214 | return test_bit(CGRP_DEAD, &cgrp->flags); |
| 210 | } | 215 | } |
| 211 | 216 | ||
| 217 | struct cgroup_subsys_state *seq_css(struct seq_file *seq) | ||
| 218 | { | ||
| 219 | struct kernfs_open_file *of = seq->private; | ||
| 220 | struct cgroup *cgrp = of->kn->parent->priv; | ||
| 221 | struct cftype *cft = seq_cft(seq); | ||
| 222 | |||
| 223 | /* | ||
| 224 | * This is open and unprotected implementation of cgroup_css(). | ||
| 225 | * seq_css() is only called from a kernfs file operation which has | ||
| 226 | * an active reference on the file. Because all the subsystem | ||
| 227 | * files are drained before a css is disassociated with a cgroup, | ||
| 228 | * the matching css from the cgroup's subsys table is guaranteed to | ||
| 229 | * be and stay valid until the enclosing operation is complete. | ||
| 230 | */ | ||
| 231 | if (cft->ss) | ||
| 232 | return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); | ||
| 233 | else | ||
| 234 | return &cgrp->dummy_css; | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL_GPL(seq_css); | ||
| 237 | |||
| 212 | /** | 238 | /** |
| 213 | * cgroup_is_descendant - test ancestry | 239 | * cgroup_is_descendant - test ancestry |
| 214 | * @cgrp: the cgroup to be tested | 240 | * @cgrp: the cgroup to be tested |
| @@ -227,7 +253,6 @@ bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) | |||
| 227 | } | 253 | } |
| 228 | return false; | 254 | return false; |
| 229 | } | 255 | } |
| 230 | EXPORT_SYMBOL_GPL(cgroup_is_descendant); | ||
| 231 | 256 | ||
| 232 | static int cgroup_is_releasable(const struct cgroup *cgrp) | 257 | static int cgroup_is_releasable(const struct cgroup *cgrp) |
| 233 | { | 258 | { |
| @@ -254,54 +279,23 @@ static int notify_on_release(const struct cgroup *cgrp) | |||
| 254 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ | 279 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| 255 | if (!((css) = rcu_dereference_check( \ | 280 | if (!((css) = rcu_dereference_check( \ |
| 256 | (cgrp)->subsys[(ssid)], \ | 281 | (cgrp)->subsys[(ssid)], \ |
| 282 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
| 257 | lockdep_is_held(&cgroup_mutex)))) { } \ | 283 | lockdep_is_held(&cgroup_mutex)))) { } \ |
| 258 | else | 284 | else |
| 259 | 285 | ||
| 260 | /** | 286 | /** |
| 261 | * for_each_subsys - iterate all loaded cgroup subsystems | 287 | * for_each_subsys - iterate all enabled cgroup subsystems |
| 262 | * @ss: the iteration cursor | 288 | * @ss: the iteration cursor |
| 263 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end | 289 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| 264 | * | ||
| 265 | * Iterates through all loaded subsystems. Should be called under | ||
| 266 | * cgroup_mutex or cgroup_root_mutex. | ||
| 267 | */ | 290 | */ |
| 268 | #define for_each_subsys(ss, ssid) \ | 291 | #define for_each_subsys(ss, ssid) \ |
| 269 | for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ | 292 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ |
| 270 | (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ | 293 | (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) |
| 271 | if (!((ss) = cgroup_subsys[(ssid)])) { } \ | ||
| 272 | else | ||
| 273 | 294 | ||
| 274 | /** | 295 | /* iterate across the hierarchies */ |
| 275 | * for_each_builtin_subsys - iterate all built-in cgroup subsystems | 296 | #define for_each_root(root) \ |
| 276 | * @ss: the iteration cursor | ||
| 277 | * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end | ||
| 278 | * | ||
| 279 | * Bulit-in subsystems are always present and iteration itself doesn't | ||
| 280 | * require any synchronization. | ||
| 281 | */ | ||
| 282 | #define for_each_builtin_subsys(ss, i) \ | ||
| 283 | for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ | ||
| 284 | (((ss) = cgroup_subsys[i]) || true); (i)++) | ||
| 285 | |||
| 286 | /* iterate across the active hierarchies */ | ||
| 287 | #define for_each_active_root(root) \ | ||
| 288 | list_for_each_entry((root), &cgroup_roots, root_list) | 297 | list_for_each_entry((root), &cgroup_roots, root_list) |
| 289 | 298 | ||
| 290 | static inline struct cgroup *__d_cgrp(struct dentry *dentry) | ||
| 291 | { | ||
| 292 | return dentry->d_fsdata; | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline struct cfent *__d_cfe(struct dentry *dentry) | ||
| 296 | { | ||
| 297 | return dentry->d_fsdata; | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline struct cftype *__d_cft(struct dentry *dentry) | ||
| 301 | { | ||
| 302 | return __d_cfe(dentry)->type; | ||
| 303 | } | ||
| 304 | |||
| 305 | /** | 299 | /** |
| 306 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. | 300 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. |
| 307 | * @cgrp: the cgroup to be checked for liveness | 301 | * @cgrp: the cgroup to be checked for liveness |
| @@ -347,23 +341,23 @@ struct cgrp_cset_link { | |||
| 347 | struct list_head cgrp_link; | 341 | struct list_head cgrp_link; |
| 348 | }; | 342 | }; |
| 349 | 343 | ||
| 350 | /* The default css_set - used by init and its children prior to any | 344 | /* |
| 345 | * The default css_set - used by init and its children prior to any | ||
| 351 | * hierarchies being mounted. It contains a pointer to the root state | 346 | * hierarchies being mounted. It contains a pointer to the root state |
| 352 | * for each subsystem. Also used to anchor the list of css_sets. Not | 347 | * for each subsystem. Also used to anchor the list of css_sets. Not |
| 353 | * reference-counted, to improve performance when child cgroups | 348 | * reference-counted, to improve performance when child cgroups |
| 354 | * haven't been created. | 349 | * haven't been created. |
| 355 | */ | 350 | */ |
| 351 | static struct css_set init_css_set = { | ||
| 352 | .refcount = ATOMIC_INIT(1), | ||
| 353 | .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), | ||
| 354 | .tasks = LIST_HEAD_INIT(init_css_set.tasks), | ||
| 355 | .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), | ||
| 356 | .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), | ||
| 357 | .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), | ||
| 358 | }; | ||
| 356 | 359 | ||
| 357 | static struct css_set init_css_set; | 360 | static int css_set_count = 1; /* 1 for init_css_set */ |
| 358 | static struct cgrp_cset_link init_cgrp_cset_link; | ||
| 359 | |||
| 360 | /* | ||
| 361 | * css_set_lock protects the list of css_set objects, and the chain of | ||
| 362 | * tasks off each css_set. Nests outside task->alloc_lock due to | ||
| 363 | * css_task_iter_start(). | ||
| 364 | */ | ||
| 365 | static DEFINE_RWLOCK(css_set_lock); | ||
| 366 | static int css_set_count; | ||
| 367 | 361 | ||
| 368 | /* | 362 | /* |
| 369 | * hash table for cgroup groups. This improves the performance to find | 363 | * hash table for cgroup groups. This improves the performance to find |
| @@ -386,30 +380,14 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) | |||
| 386 | return key; | 380 | return key; |
| 387 | } | 381 | } |
| 388 | 382 | ||
| 389 | /* | 383 | static void put_css_set_locked(struct css_set *cset, bool taskexit) |
| 390 | * We don't maintain the lists running through each css_set to its task | ||
| 391 | * until after the first call to css_task_iter_start(). This reduces the | ||
| 392 | * fork()/exit() overhead for people who have cgroups compiled into their | ||
| 393 | * kernel but not actually in use. | ||
| 394 | */ | ||
| 395 | static int use_task_css_set_links __read_mostly; | ||
| 396 | |||
| 397 | static void __put_css_set(struct css_set *cset, int taskexit) | ||
| 398 | { | 384 | { |
| 399 | struct cgrp_cset_link *link, *tmp_link; | 385 | struct cgrp_cset_link *link, *tmp_link; |
| 400 | 386 | ||
| 401 | /* | 387 | lockdep_assert_held(&css_set_rwsem); |
| 402 | * Ensure that the refcount doesn't hit zero while any readers | 388 | |
| 403 | * can see it. Similar to atomic_dec_and_lock(), but for an | 389 | if (!atomic_dec_and_test(&cset->refcount)) |
| 404 | * rwlock | ||
| 405 | */ | ||
| 406 | if (atomic_add_unless(&cset->refcount, -1, 1)) | ||
| 407 | return; | ||
| 408 | write_lock(&css_set_lock); | ||
| 409 | if (!atomic_dec_and_test(&cset->refcount)) { | ||
| 410 | write_unlock(&css_set_lock); | ||
| 411 | return; | 390 | return; |
| 412 | } | ||
| 413 | 391 | ||
| 414 | /* This css_set is dead. unlink it and release cgroup refcounts */ | 392 | /* This css_set is dead. unlink it and release cgroup refcounts */ |
| 415 | hash_del(&cset->hlist); | 393 | hash_del(&cset->hlist); |
| @@ -421,7 +399,7 @@ static void __put_css_set(struct css_set *cset, int taskexit) | |||
| 421 | list_del(&link->cset_link); | 399 | list_del(&link->cset_link); |
| 422 | list_del(&link->cgrp_link); | 400 | list_del(&link->cgrp_link); |
| 423 | 401 | ||
| 424 | /* @cgrp can't go away while we're holding css_set_lock */ | 402 | /* @cgrp can't go away while we're holding css_set_rwsem */ |
| 425 | if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { | 403 | if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { |
| 426 | if (taskexit) | 404 | if (taskexit) |
| 427 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 405 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
| @@ -431,10 +409,24 @@ static void __put_css_set(struct css_set *cset, int taskexit) | |||
| 431 | kfree(link); | 409 | kfree(link); |
| 432 | } | 410 | } |
| 433 | 411 | ||
| 434 | write_unlock(&css_set_lock); | ||
| 435 | kfree_rcu(cset, rcu_head); | 412 | kfree_rcu(cset, rcu_head); |
| 436 | } | 413 | } |
| 437 | 414 | ||
| 415 | static void put_css_set(struct css_set *cset, bool taskexit) | ||
| 416 | { | ||
| 417 | /* | ||
| 418 | * Ensure that the refcount doesn't hit zero while any readers | ||
| 419 | * can see it. Similar to atomic_dec_and_lock(), but for an | ||
| 420 | * rwlock | ||
| 421 | */ | ||
| 422 | if (atomic_add_unless(&cset->refcount, -1, 1)) | ||
| 423 | return; | ||
| 424 | |||
| 425 | down_write(&css_set_rwsem); | ||
| 426 | put_css_set_locked(cset, taskexit); | ||
| 427 | up_write(&css_set_rwsem); | ||
| 428 | } | ||
| 429 | |||
| 438 | /* | 430 | /* |
| 439 | * refcounted get/put for css_set objects | 431 | * refcounted get/put for css_set objects |
| 440 | */ | 432 | */ |
| @@ -443,16 +435,6 @@ static inline void get_css_set(struct css_set *cset) | |||
| 443 | atomic_inc(&cset->refcount); | 435 | atomic_inc(&cset->refcount); |
| 444 | } | 436 | } |
| 445 | 437 | ||
| 446 | static inline void put_css_set(struct css_set *cset) | ||
| 447 | { | ||
| 448 | __put_css_set(cset, 0); | ||
| 449 | } | ||
| 450 | |||
| 451 | static inline void put_css_set_taskexit(struct css_set *cset) | ||
| 452 | { | ||
| 453 | __put_css_set(cset, 1); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | 438 | /** |
| 457 | * compare_css_sets - helper function for find_existing_css_set(). | 439 | * compare_css_sets - helper function for find_existing_css_set(). |
| 458 | * @cset: candidate css_set being tested | 440 | * @cset: candidate css_set being tested |
| @@ -535,7 +517,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, | |||
| 535 | struct cgroup *cgrp, | 517 | struct cgroup *cgrp, |
| 536 | struct cgroup_subsys_state *template[]) | 518 | struct cgroup_subsys_state *template[]) |
| 537 | { | 519 | { |
| 538 | struct cgroupfs_root *root = cgrp->root; | 520 | struct cgroup_root *root = cgrp->root; |
| 539 | struct cgroup_subsys *ss; | 521 | struct cgroup_subsys *ss; |
| 540 | struct css_set *cset; | 522 | struct css_set *cset; |
| 541 | unsigned long key; | 523 | unsigned long key; |
| @@ -547,7 +529,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, | |||
| 547 | * won't change, so no need for locking. | 529 | * won't change, so no need for locking. |
| 548 | */ | 530 | */ |
| 549 | for_each_subsys(ss, i) { | 531 | for_each_subsys(ss, i) { |
| 550 | if (root->subsys_mask & (1UL << i)) { | 532 | if (root->cgrp.subsys_mask & (1UL << i)) { |
| 551 | /* Subsystem is in this hierarchy. So we want | 533 | /* Subsystem is in this hierarchy. So we want |
| 552 | * the subsystem state from the new | 534 | * the subsystem state from the new |
| 553 | * cgroup */ | 535 | * cgroup */ |
| @@ -652,11 +634,11 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 652 | 634 | ||
| 653 | /* First see if we already have a cgroup group that matches | 635 | /* First see if we already have a cgroup group that matches |
| 654 | * the desired set */ | 636 | * the desired set */ |
| 655 | read_lock(&css_set_lock); | 637 | down_read(&css_set_rwsem); |
| 656 | cset = find_existing_css_set(old_cset, cgrp, template); | 638 | cset = find_existing_css_set(old_cset, cgrp, template); |
| 657 | if (cset) | 639 | if (cset) |
| 658 | get_css_set(cset); | 640 | get_css_set(cset); |
| 659 | read_unlock(&css_set_lock); | 641 | up_read(&css_set_rwsem); |
| 660 | 642 | ||
| 661 | if (cset) | 643 | if (cset) |
| 662 | return cset; | 644 | return cset; |
| @@ -674,13 +656,16 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 674 | atomic_set(&cset->refcount, 1); | 656 | atomic_set(&cset->refcount, 1); |
| 675 | INIT_LIST_HEAD(&cset->cgrp_links); | 657 | INIT_LIST_HEAD(&cset->cgrp_links); |
| 676 | INIT_LIST_HEAD(&cset->tasks); | 658 | INIT_LIST_HEAD(&cset->tasks); |
| 659 | INIT_LIST_HEAD(&cset->mg_tasks); | ||
| 660 | INIT_LIST_HEAD(&cset->mg_preload_node); | ||
| 661 | INIT_LIST_HEAD(&cset->mg_node); | ||
| 677 | INIT_HLIST_NODE(&cset->hlist); | 662 | INIT_HLIST_NODE(&cset->hlist); |
| 678 | 663 | ||
| 679 | /* Copy the set of subsystem state objects generated in | 664 | /* Copy the set of subsystem state objects generated in |
| 680 | * find_existing_css_set() */ | 665 | * find_existing_css_set() */ |
| 681 | memcpy(cset->subsys, template, sizeof(cset->subsys)); | 666 | memcpy(cset->subsys, template, sizeof(cset->subsys)); |
| 682 | 667 | ||
| 683 | write_lock(&css_set_lock); | 668 | down_write(&css_set_rwsem); |
| 684 | /* Add reference counts and links from the new css_set. */ | 669 | /* Add reference counts and links from the new css_set. */ |
| 685 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { | 670 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
| 686 | struct cgroup *c = link->cgrp; | 671 | struct cgroup *c = link->cgrp; |
| @@ -698,31 +683,105 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 698 | key = css_set_hash(cset->subsys); | 683 | key = css_set_hash(cset->subsys); |
| 699 | hash_add(css_set_table, &cset->hlist, key); | 684 | hash_add(css_set_table, &cset->hlist, key); |
| 700 | 685 | ||
| 701 | write_unlock(&css_set_lock); | 686 | up_write(&css_set_rwsem); |
| 702 | 687 | ||
| 703 | return cset; | 688 | return cset; |
| 704 | } | 689 | } |
| 705 | 690 | ||
| 706 | /* | 691 | static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) |
| 707 | * Return the cgroup for "task" from the given hierarchy. Must be | ||
| 708 | * called with cgroup_mutex held. | ||
| 709 | */ | ||
| 710 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, | ||
| 711 | struct cgroupfs_root *root) | ||
| 712 | { | 692 | { |
| 713 | struct css_set *cset; | 693 | struct cgroup *root_cgrp = kf_root->kn->priv; |
| 714 | struct cgroup *res = NULL; | 694 | |
| 695 | return root_cgrp->root; | ||
| 696 | } | ||
| 697 | |||
| 698 | static int cgroup_init_root_id(struct cgroup_root *root) | ||
| 699 | { | ||
| 700 | int id; | ||
| 701 | |||
| 702 | lockdep_assert_held(&cgroup_mutex); | ||
| 703 | |||
| 704 | id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); | ||
| 705 | if (id < 0) | ||
| 706 | return id; | ||
| 707 | |||
| 708 | root->hierarchy_id = id; | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | |||
| 712 | static void cgroup_exit_root_id(struct cgroup_root *root) | ||
| 713 | { | ||
| 714 | lockdep_assert_held(&cgroup_mutex); | ||
| 715 | |||
| 716 | if (root->hierarchy_id) { | ||
| 717 | idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); | ||
| 718 | root->hierarchy_id = 0; | ||
| 719 | } | ||
| 720 | } | ||
| 721 | |||
| 722 | static void cgroup_free_root(struct cgroup_root *root) | ||
| 723 | { | ||
| 724 | if (root) { | ||
| 725 | /* hierarhcy ID shoulid already have been released */ | ||
| 726 | WARN_ON_ONCE(root->hierarchy_id); | ||
| 727 | |||
| 728 | idr_destroy(&root->cgroup_idr); | ||
| 729 | kfree(root); | ||
| 730 | } | ||
| 731 | } | ||
| 732 | |||
| 733 | static void cgroup_destroy_root(struct cgroup_root *root) | ||
| 734 | { | ||
| 735 | struct cgroup *cgrp = &root->cgrp; | ||
| 736 | struct cgrp_cset_link *link, *tmp_link; | ||
| 737 | |||
| 738 | mutex_lock(&cgroup_tree_mutex); | ||
| 739 | mutex_lock(&cgroup_mutex); | ||
| 740 | |||
| 741 | BUG_ON(atomic_read(&root->nr_cgrps)); | ||
| 742 | BUG_ON(!list_empty(&cgrp->children)); | ||
| 743 | |||
| 744 | /* Rebind all subsystems back to the default hierarchy */ | ||
| 745 | rebind_subsystems(&cgrp_dfl_root, cgrp->subsys_mask); | ||
| 715 | 746 | ||
| 716 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | ||
| 717 | read_lock(&css_set_lock); | ||
| 718 | /* | 747 | /* |
| 719 | * No need to lock the task - since we hold cgroup_mutex the | 748 | * Release all the links from cset_links to this hierarchy's |
| 720 | * task can't change groups, so the only thing that can happen | 749 | * root cgroup |
| 721 | * is that it exits and its css is set back to init_css_set. | ||
| 722 | */ | 750 | */ |
| 723 | cset = task_css_set(task); | 751 | down_write(&css_set_rwsem); |
| 752 | |||
| 753 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | ||
| 754 | list_del(&link->cset_link); | ||
| 755 | list_del(&link->cgrp_link); | ||
| 756 | kfree(link); | ||
| 757 | } | ||
| 758 | up_write(&css_set_rwsem); | ||
| 759 | |||
| 760 | if (!list_empty(&root->root_list)) { | ||
| 761 | list_del(&root->root_list); | ||
| 762 | cgroup_root_count--; | ||
| 763 | } | ||
| 764 | |||
| 765 | cgroup_exit_root_id(root); | ||
| 766 | |||
| 767 | mutex_unlock(&cgroup_mutex); | ||
| 768 | mutex_unlock(&cgroup_tree_mutex); | ||
| 769 | |||
| 770 | kernfs_destroy_root(root->kf_root); | ||
| 771 | cgroup_free_root(root); | ||
| 772 | } | ||
| 773 | |||
| 774 | /* look up cgroup associated with given css_set on the specified hierarchy */ | ||
| 775 | static struct cgroup *cset_cgroup_from_root(struct css_set *cset, | ||
| 776 | struct cgroup_root *root) | ||
| 777 | { | ||
| 778 | struct cgroup *res = NULL; | ||
| 779 | |||
| 780 | lockdep_assert_held(&cgroup_mutex); | ||
| 781 | lockdep_assert_held(&css_set_rwsem); | ||
| 782 | |||
| 724 | if (cset == &init_css_set) { | 783 | if (cset == &init_css_set) { |
| 725 | res = &root->top_cgroup; | 784 | res = &root->cgrp; |
| 726 | } else { | 785 | } else { |
| 727 | struct cgrp_cset_link *link; | 786 | struct cgrp_cset_link *link; |
| 728 | 787 | ||
| @@ -735,16 +794,27 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, | |||
| 735 | } | 794 | } |
| 736 | } | 795 | } |
| 737 | } | 796 | } |
| 738 | read_unlock(&css_set_lock); | 797 | |
| 739 | BUG_ON(!res); | 798 | BUG_ON(!res); |
| 740 | return res; | 799 | return res; |
| 741 | } | 800 | } |
| 742 | 801 | ||
| 743 | /* | 802 | /* |
| 744 | * There is one global cgroup mutex. We also require taking | 803 | * Return the cgroup for "task" from the given hierarchy. Must be |
| 745 | * task_lock() when dereferencing a task's cgroup subsys pointers. | 804 | * called with cgroup_mutex and css_set_rwsem held. |
| 746 | * See "The task_lock() exception", at the end of this comment. | 805 | */ |
| 747 | * | 806 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, |
| 807 | struct cgroup_root *root) | ||
| 808 | { | ||
| 809 | /* | ||
| 810 | * No need to lock the task - since we hold cgroup_mutex the | ||
| 811 | * task can't change groups, so the only thing that can happen | ||
| 812 | * is that it exits and its css is set back to init_css_set. | ||
| 813 | */ | ||
| 814 | return cset_cgroup_from_root(task_css_set(task), root); | ||
| 815 | } | ||
| 816 | |||
| 817 | /* | ||
| 748 | * A task must hold cgroup_mutex to modify cgroups. | 818 | * A task must hold cgroup_mutex to modify cgroups. |
| 749 | * | 819 | * |
| 750 | * Any task can increment and decrement the count field without lock. | 820 | * Any task can increment and decrement the count field without lock. |
| @@ -770,98 +840,79 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, | |||
| 770 | * A cgroup can only be deleted if both its 'count' of using tasks | 840 | * A cgroup can only be deleted if both its 'count' of using tasks |
| 771 | * is zero, and its list of 'children' cgroups is empty. Since all | 841 | * is zero, and its list of 'children' cgroups is empty. Since all |
| 772 | * tasks in the system use _some_ cgroup, and since there is always at | 842 | * tasks in the system use _some_ cgroup, and since there is always at |
| 773 | * least one task in the system (init, pid == 1), therefore, top_cgroup | 843 | * least one task in the system (init, pid == 1), therefore, root cgroup |
| 774 | * always has either children cgroups and/or using tasks. So we don't | 844 | * always has either children cgroups and/or using tasks. So we don't |
| 775 | * need a special hack to ensure that top_cgroup cannot be deleted. | 845 | * need a special hack to ensure that root cgroup cannot be deleted. |
| 776 | * | ||
| 777 | * The task_lock() exception | ||
| 778 | * | ||
| 779 | * The need for this exception arises from the action of | ||
| 780 | * cgroup_attach_task(), which overwrites one task's cgroup pointer with | ||
| 781 | * another. It does so using cgroup_mutex, however there are | ||
| 782 | * several performance critical places that need to reference | ||
| 783 | * task->cgroup without the expense of grabbing a system global | ||
| 784 | * mutex. Therefore except as noted below, when dereferencing or, as | ||
| 785 | * in cgroup_attach_task(), modifying a task's cgroup pointer we use | ||
| 786 | * task_lock(), which acts on a spinlock (task->alloc_lock) already in | ||
| 787 | * the task_struct routinely used for such matters. | ||
| 788 | * | 846 | * |
| 789 | * P.S. One more locking exception. RCU is used to guard the | 847 | * P.S. One more locking exception. RCU is used to guard the |
| 790 | * update of a tasks cgroup pointer by cgroup_attach_task() | 848 | * update of a tasks cgroup pointer by cgroup_attach_task() |
| 791 | */ | 849 | */ |
| 792 | 850 | ||
| 793 | /* | ||
| 794 | * A couple of forward declarations required, due to cyclic reference loop: | ||
| 795 | * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> | ||
| 796 | * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations | ||
| 797 | * -> cgroup_mkdir. | ||
| 798 | */ | ||
| 799 | |||
| 800 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); | ||
| 801 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | ||
| 802 | static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); | 851 | static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); |
| 803 | static const struct inode_operations cgroup_dir_inode_operations; | 852 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops; |
| 804 | static const struct file_operations proc_cgroupstats_operations; | 853 | static const struct file_operations proc_cgroupstats_operations; |
| 805 | 854 | ||
| 806 | static struct backing_dev_info cgroup_backing_dev_info = { | 855 | static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, |
| 807 | .name = "cgroup", | 856 | char *buf) |
| 808 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
| 809 | }; | ||
| 810 | |||
| 811 | static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) | ||
| 812 | { | 857 | { |
| 813 | struct inode *inode = new_inode(sb); | 858 | if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && |
| 814 | 859 | !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) | |
| 815 | if (inode) { | 860 | snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s", |
| 816 | inode->i_ino = get_next_ino(); | 861 | cft->ss->name, cft->name); |
| 817 | inode->i_mode = mode; | 862 | else |
| 818 | inode->i_uid = current_fsuid(); | 863 | strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); |
| 819 | inode->i_gid = current_fsgid(); | 864 | return buf; |
| 820 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
| 821 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | ||
| 822 | } | ||
| 823 | return inode; | ||
| 824 | } | 865 | } |
| 825 | 866 | ||
| 826 | static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry) | 867 | /** |
| 868 | * cgroup_file_mode - deduce file mode of a control file | ||
| 869 | * @cft: the control file in question | ||
| 870 | * | ||
| 871 | * returns cft->mode if ->mode is not 0 | ||
| 872 | * returns S_IRUGO|S_IWUSR if it has both a read and a write handler | ||
| 873 | * returns S_IRUGO if it has only a read handler | ||
| 874 | * returns S_IWUSR if it has only a write hander | ||
| 875 | */ | ||
| 876 | static umode_t cgroup_file_mode(const struct cftype *cft) | ||
| 827 | { | 877 | { |
| 828 | struct cgroup_name *name; | 878 | umode_t mode = 0; |
| 829 | 879 | ||
| 830 | name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL); | 880 | if (cft->mode) |
| 831 | if (!name) | 881 | return cft->mode; |
| 832 | return NULL; | 882 | |
| 833 | strcpy(name->name, dentry->d_name.name); | 883 | if (cft->read_u64 || cft->read_s64 || cft->seq_show) |
| 834 | return name; | 884 | mode |= S_IRUGO; |
| 885 | |||
| 886 | if (cft->write_u64 || cft->write_s64 || cft->write_string || | ||
| 887 | cft->trigger) | ||
| 888 | mode |= S_IWUSR; | ||
| 889 | |||
| 890 | return mode; | ||
| 835 | } | 891 | } |
| 836 | 892 | ||
| 837 | static void cgroup_free_fn(struct work_struct *work) | 893 | static void cgroup_free_fn(struct work_struct *work) |
| 838 | { | 894 | { |
| 839 | struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); | 895 | struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); |
| 840 | 896 | ||
| 841 | mutex_lock(&cgroup_mutex); | 897 | atomic_dec(&cgrp->root->nr_cgrps); |
| 842 | cgrp->root->number_of_cgroups--; | ||
| 843 | mutex_unlock(&cgroup_mutex); | ||
| 844 | |||
| 845 | /* | ||
| 846 | * We get a ref to the parent's dentry, and put the ref when | ||
| 847 | * this cgroup is being freed, so it's guaranteed that the | ||
| 848 | * parent won't be destroyed before its children. | ||
| 849 | */ | ||
| 850 | dput(cgrp->parent->dentry); | ||
| 851 | |||
| 852 | /* | ||
| 853 | * Drop the active superblock reference that we took when we | ||
| 854 | * created the cgroup. This will free cgrp->root, if we are | ||
| 855 | * holding the last reference to @sb. | ||
| 856 | */ | ||
| 857 | deactivate_super(cgrp->root->sb); | ||
| 858 | |||
| 859 | cgroup_pidlist_destroy_all(cgrp); | 898 | cgroup_pidlist_destroy_all(cgrp); |
| 860 | 899 | ||
| 861 | simple_xattrs_free(&cgrp->xattrs); | 900 | if (cgrp->parent) { |
| 862 | 901 | /* | |
| 863 | kfree(rcu_dereference_raw(cgrp->name)); | 902 | * We get a ref to the parent, and put the ref when this |
| 864 | kfree(cgrp); | 903 | * cgroup is being freed, so it's guaranteed that the |
| 904 | * parent won't be destroyed before its children. | ||
| 905 | */ | ||
| 906 | cgroup_put(cgrp->parent); | ||
| 907 | kernfs_put(cgrp->kn); | ||
| 908 | kfree(cgrp); | ||
| 909 | } else { | ||
| 910 | /* | ||
| 911 | * This is root cgroup's refcnt reaching zero, which | ||
| 912 | * indicates that the root should be released. | ||
| 913 | */ | ||
| 914 | cgroup_destroy_root(cgrp->root); | ||
| 915 | } | ||
| 865 | } | 916 | } |
| 866 | 917 | ||
| 867 | static void cgroup_free_rcu(struct rcu_head *head) | 918 | static void cgroup_free_rcu(struct rcu_head *head) |
| @@ -872,73 +923,40 @@ static void cgroup_free_rcu(struct rcu_head *head) | |||
| 872 | queue_work(cgroup_destroy_wq, &cgrp->destroy_work); | 923 | queue_work(cgroup_destroy_wq, &cgrp->destroy_work); |
| 873 | } | 924 | } |
| 874 | 925 | ||
| 875 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) | 926 | static void cgroup_get(struct cgroup *cgrp) |
| 876 | { | ||
| 877 | /* is dentry a directory ? if so, kfree() associated cgroup */ | ||
| 878 | if (S_ISDIR(inode->i_mode)) { | ||
| 879 | struct cgroup *cgrp = dentry->d_fsdata; | ||
| 880 | |||
| 881 | BUG_ON(!(cgroup_is_dead(cgrp))); | ||
| 882 | |||
| 883 | /* | ||
| 884 | * XXX: cgrp->id is only used to look up css's. As cgroup | ||
| 885 | * and css's lifetimes will be decoupled, it should be made | ||
| 886 | * per-subsystem and moved to css->id so that lookups are | ||
| 887 | * successful until the target css is released. | ||
| 888 | */ | ||
| 889 | mutex_lock(&cgroup_mutex); | ||
| 890 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 891 | mutex_unlock(&cgroup_mutex); | ||
| 892 | cgrp->id = -1; | ||
| 893 | |||
| 894 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | ||
| 895 | } else { | ||
| 896 | struct cfent *cfe = __d_cfe(dentry); | ||
| 897 | struct cgroup *cgrp = dentry->d_parent->d_fsdata; | ||
| 898 | |||
| 899 | WARN_ONCE(!list_empty(&cfe->node) && | ||
| 900 | cgrp != &cgrp->root->top_cgroup, | ||
| 901 | "cfe still linked for %s\n", cfe->type->name); | ||
| 902 | simple_xattrs_free(&cfe->xattrs); | ||
| 903 | kfree(cfe); | ||
| 904 | } | ||
| 905 | iput(inode); | ||
| 906 | } | ||
| 907 | |||
| 908 | static void remove_dir(struct dentry *d) | ||
| 909 | { | 927 | { |
| 910 | struct dentry *parent = dget(d->d_parent); | 928 | WARN_ON_ONCE(cgroup_is_dead(cgrp)); |
| 911 | 929 | WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0); | |
| 912 | d_delete(d); | 930 | atomic_inc(&cgrp->refcnt); |
| 913 | simple_rmdir(parent->d_inode, d); | ||
| 914 | dput(parent); | ||
| 915 | } | 931 | } |
| 916 | 932 | ||
| 917 | static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) | 933 | static void cgroup_put(struct cgroup *cgrp) |
| 918 | { | 934 | { |
| 919 | struct cfent *cfe; | 935 | if (!atomic_dec_and_test(&cgrp->refcnt)) |
| 920 | 936 | return; | |
| 921 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | 937 | if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp))) |
| 922 | lockdep_assert_held(&cgroup_mutex); | 938 | return; |
| 923 | 939 | ||
| 924 | /* | 940 | /* |
| 925 | * If we're doing cleanup due to failure of cgroup_create(), | 941 | * XXX: cgrp->id is only used to look up css's. As cgroup and |
| 926 | * the corresponding @cfe may not exist. | 942 | * css's lifetimes will be decoupled, it should be made |
| 943 | * per-subsystem and moved to css->id so that lookups are | ||
| 944 | * successful until the target css is released. | ||
| 927 | */ | 945 | */ |
| 928 | list_for_each_entry(cfe, &cgrp->files, node) { | 946 | mutex_lock(&cgroup_mutex); |
| 929 | struct dentry *d = cfe->dentry; | 947 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
| 948 | mutex_unlock(&cgroup_mutex); | ||
| 949 | cgrp->id = -1; | ||
| 930 | 950 | ||
| 931 | if (cft && cfe->type != cft) | 951 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| 932 | continue; | 952 | } |
| 933 | 953 | ||
| 934 | dget(d); | 954 | static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) |
| 935 | d_delete(d); | 955 | { |
| 936 | simple_unlink(cgrp->dentry->d_inode, d); | 956 | char name[CGROUP_FILE_NAME_MAX]; |
| 937 | list_del_init(&cfe->node); | ||
| 938 | dput(d); | ||
| 939 | 957 | ||
| 940 | break; | 958 | lockdep_assert_held(&cgroup_tree_mutex); |
| 941 | } | 959 | kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); |
| 942 | } | 960 | } |
| 943 | 961 | ||
| 944 | /** | 962 | /** |
| @@ -952,144 +970,106 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) | |||
| 952 | int i; | 970 | int i; |
| 953 | 971 | ||
| 954 | for_each_subsys(ss, i) { | 972 | for_each_subsys(ss, i) { |
| 955 | struct cftype_set *set; | 973 | struct cftype *cfts; |
| 956 | 974 | ||
| 957 | if (!test_bit(i, &subsys_mask)) | 975 | if (!test_bit(i, &subsys_mask)) |
| 958 | continue; | 976 | continue; |
| 959 | list_for_each_entry(set, &ss->cftsets, node) | 977 | list_for_each_entry(cfts, &ss->cfts, node) |
| 960 | cgroup_addrm_files(cgrp, set->cfts, false); | 978 | cgroup_addrm_files(cgrp, cfts, false); |
| 961 | } | 979 | } |
| 962 | } | 980 | } |
| 963 | 981 | ||
| 964 | /* | 982 | static int rebind_subsystems(struct cgroup_root *dst_root, |
| 965 | * NOTE : the dentry must have been dget()'ed | 983 | unsigned long ss_mask) |
| 966 | */ | ||
| 967 | static void cgroup_d_remove_dir(struct dentry *dentry) | ||
| 968 | { | ||
| 969 | struct dentry *parent; | ||
| 970 | |||
| 971 | parent = dentry->d_parent; | ||
| 972 | spin_lock(&parent->d_lock); | ||
| 973 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | ||
| 974 | list_del_init(&dentry->d_u.d_child); | ||
| 975 | spin_unlock(&dentry->d_lock); | ||
| 976 | spin_unlock(&parent->d_lock); | ||
| 977 | remove_dir(dentry); | ||
| 978 | } | ||
| 979 | |||
| 980 | /* | ||
| 981 | * Call with cgroup_mutex held. Drops reference counts on modules, including | ||
| 982 | * any duplicate ones that parse_cgroupfs_options took. If this function | ||
| 983 | * returns an error, no reference counts are touched. | ||
| 984 | */ | ||
| 985 | static int rebind_subsystems(struct cgroupfs_root *root, | ||
| 986 | unsigned long added_mask, unsigned removed_mask) | ||
| 987 | { | 984 | { |
| 988 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 989 | struct cgroup_subsys *ss; | 985 | struct cgroup_subsys *ss; |
| 990 | unsigned long pinned = 0; | 986 | int ssid, ret; |
| 991 | int i, ret; | ||
| 992 | 987 | ||
| 993 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | 988 | lockdep_assert_held(&cgroup_tree_mutex); |
| 994 | BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); | 989 | lockdep_assert_held(&cgroup_mutex); |
| 995 | 990 | ||
| 996 | /* Check that any added subsystems are currently free */ | 991 | for_each_subsys(ss, ssid) { |
| 997 | for_each_subsys(ss, i) { | 992 | if (!(ss_mask & (1 << ssid))) |
| 998 | if (!(added_mask & (1 << i))) | ||
| 999 | continue; | 993 | continue; |
| 1000 | 994 | ||
| 1001 | /* is the subsystem mounted elsewhere? */ | 995 | /* if @ss is on the dummy_root, we can always move it */ |
| 1002 | if (ss->root != &cgroup_dummy_root) { | 996 | if (ss->root == &cgrp_dfl_root) |
| 1003 | ret = -EBUSY; | 997 | continue; |
| 1004 | goto out_put; | ||
| 1005 | } | ||
| 1006 | 998 | ||
| 1007 | /* pin the module */ | 999 | /* if @ss has non-root cgroups attached to it, can't move */ |
| 1008 | if (!try_module_get(ss->module)) { | 1000 | if (!list_empty(&ss->root->cgrp.children)) |
| 1009 | ret = -ENOENT; | 1001 | return -EBUSY; |
| 1010 | goto out_put; | ||
| 1011 | } | ||
| 1012 | pinned |= 1 << i; | ||
| 1013 | } | ||
| 1014 | 1002 | ||
| 1015 | /* subsys could be missing if unloaded between parsing and here */ | 1003 | /* can't move between two non-dummy roots either */ |
| 1016 | if (added_mask != pinned) { | 1004 | if (dst_root != &cgrp_dfl_root) |
| 1017 | ret = -ENOENT; | 1005 | return -EBUSY; |
| 1018 | goto out_put; | ||
| 1019 | } | 1006 | } |
| 1020 | 1007 | ||
| 1021 | ret = cgroup_populate_dir(cgrp, added_mask); | 1008 | ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask); |
| 1022 | if (ret) | 1009 | if (ret) { |
| 1023 | goto out_put; | 1010 | if (dst_root != &cgrp_dfl_root) |
| 1011 | return ret; | ||
| 1012 | |||
| 1013 | /* | ||
| 1014 | * Rebinding back to the default root is not allowed to | ||
| 1015 | * fail. Using both default and non-default roots should | ||
| 1016 | * be rare. Moving subsystems back and forth even more so. | ||
| 1017 | * Just warn about it and continue. | ||
| 1018 | */ | ||
| 1019 | if (cgrp_dfl_root_visible) { | ||
| 1020 | pr_warning("cgroup: failed to create files (%d) while rebinding 0x%lx to default root\n", | ||
| 1021 | ret, ss_mask); | ||
| 1022 | pr_warning("cgroup: you may retry by moving them to a different hierarchy and unbinding\n"); | ||
| 1023 | } | ||
| 1024 | } | ||
| 1024 | 1025 | ||
| 1025 | /* | 1026 | /* |
| 1026 | * Nothing can fail from this point on. Remove files for the | 1027 | * Nothing can fail from this point on. Remove files for the |
| 1027 | * removed subsystems and rebind each subsystem. | 1028 | * removed subsystems and rebind each subsystem. |
| 1028 | */ | 1029 | */ |
| 1029 | cgroup_clear_dir(cgrp, removed_mask); | 1030 | mutex_unlock(&cgroup_mutex); |
| 1030 | 1031 | for_each_subsys(ss, ssid) | |
| 1031 | for_each_subsys(ss, i) { | 1032 | if (ss_mask & (1 << ssid)) |
| 1032 | unsigned long bit = 1UL << i; | 1033 | cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); |
| 1033 | 1034 | mutex_lock(&cgroup_mutex); | |
| 1034 | if (bit & added_mask) { | ||
| 1035 | /* We're binding this subsystem to this hierarchy */ | ||
| 1036 | BUG_ON(cgroup_css(cgrp, ss)); | ||
| 1037 | BUG_ON(!cgroup_css(cgroup_dummy_top, ss)); | ||
| 1038 | BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top); | ||
| 1039 | 1035 | ||
| 1040 | rcu_assign_pointer(cgrp->subsys[i], | 1036 | for_each_subsys(ss, ssid) { |
| 1041 | cgroup_css(cgroup_dummy_top, ss)); | 1037 | struct cgroup_root *src_root; |
| 1042 | cgroup_css(cgrp, ss)->cgroup = cgrp; | 1038 | struct cgroup_subsys_state *css; |
| 1043 | 1039 | ||
| 1044 | ss->root = root; | 1040 | if (!(ss_mask & (1 << ssid))) |
| 1045 | if (ss->bind) | 1041 | continue; |
| 1046 | ss->bind(cgroup_css(cgrp, ss)); | ||
| 1047 | 1042 | ||
| 1048 | /* refcount was already taken, and we're keeping it */ | 1043 | src_root = ss->root; |
| 1049 | root->subsys_mask |= bit; | 1044 | css = cgroup_css(&src_root->cgrp, ss); |
| 1050 | } else if (bit & removed_mask) { | ||
| 1051 | /* We're removing this subsystem */ | ||
| 1052 | BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss)); | ||
| 1053 | BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp); | ||
| 1054 | 1045 | ||
| 1055 | if (ss->bind) | 1046 | WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss)); |
| 1056 | ss->bind(cgroup_css(cgroup_dummy_top, ss)); | ||
| 1057 | 1047 | ||
| 1058 | cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top; | 1048 | RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL); |
| 1059 | RCU_INIT_POINTER(cgrp->subsys[i], NULL); | 1049 | rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css); |
| 1050 | ss->root = dst_root; | ||
| 1051 | css->cgroup = &dst_root->cgrp; | ||
| 1060 | 1052 | ||
| 1061 | cgroup_subsys[i]->root = &cgroup_dummy_root; | 1053 | src_root->cgrp.subsys_mask &= ~(1 << ssid); |
| 1054 | dst_root->cgrp.subsys_mask |= 1 << ssid; | ||
| 1062 | 1055 | ||
| 1063 | /* subsystem is now free - drop reference on module */ | 1056 | if (ss->bind) |
| 1064 | module_put(ss->module); | 1057 | ss->bind(css); |
| 1065 | root->subsys_mask &= ~bit; | ||
| 1066 | } | ||
| 1067 | } | 1058 | } |
| 1068 | 1059 | ||
| 1069 | /* | 1060 | kernfs_activate(dst_root->cgrp.kn); |
| 1070 | * Mark @root has finished binding subsystems. @root->subsys_mask | ||
| 1071 | * now matches the bound subsystems. | ||
| 1072 | */ | ||
| 1073 | root->flags |= CGRP_ROOT_SUBSYS_BOUND; | ||
| 1074 | |||
| 1075 | return 0; | 1061 | return 0; |
| 1076 | |||
| 1077 | out_put: | ||
| 1078 | for_each_subsys(ss, i) | ||
| 1079 | if (pinned & (1 << i)) | ||
| 1080 | module_put(ss->module); | ||
| 1081 | return ret; | ||
| 1082 | } | 1062 | } |
| 1083 | 1063 | ||
| 1084 | static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) | 1064 | static int cgroup_show_options(struct seq_file *seq, |
| 1065 | struct kernfs_root *kf_root) | ||
| 1085 | { | 1066 | { |
| 1086 | struct cgroupfs_root *root = dentry->d_sb->s_fs_info; | 1067 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| 1087 | struct cgroup_subsys *ss; | 1068 | struct cgroup_subsys *ss; |
| 1088 | int ssid; | 1069 | int ssid; |
| 1089 | 1070 | ||
| 1090 | mutex_lock(&cgroup_root_mutex); | ||
| 1091 | for_each_subsys(ss, ssid) | 1071 | for_each_subsys(ss, ssid) |
| 1092 | if (root->subsys_mask & (1 << ssid)) | 1072 | if (root->cgrp.subsys_mask & (1 << ssid)) |
| 1093 | seq_printf(seq, ",%s", ss->name); | 1073 | seq_printf(seq, ",%s", ss->name); |
| 1094 | if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) | 1074 | if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) |
| 1095 | seq_puts(seq, ",sane_behavior"); | 1075 | seq_puts(seq, ",sane_behavior"); |
| @@ -1097,13 +1077,16 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) | |||
| 1097 | seq_puts(seq, ",noprefix"); | 1077 | seq_puts(seq, ",noprefix"); |
| 1098 | if (root->flags & CGRP_ROOT_XATTR) | 1078 | if (root->flags & CGRP_ROOT_XATTR) |
| 1099 | seq_puts(seq, ",xattr"); | 1079 | seq_puts(seq, ",xattr"); |
| 1080 | |||
| 1081 | spin_lock(&release_agent_path_lock); | ||
| 1100 | if (strlen(root->release_agent_path)) | 1082 | if (strlen(root->release_agent_path)) |
| 1101 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); | 1083 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); |
| 1102 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags)) | 1084 | spin_unlock(&release_agent_path_lock); |
| 1085 | |||
| 1086 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) | ||
| 1103 | seq_puts(seq, ",clone_children"); | 1087 | seq_puts(seq, ",clone_children"); |
| 1104 | if (strlen(root->name)) | 1088 | if (strlen(root->name)) |
| 1105 | seq_printf(seq, ",name=%s", root->name); | 1089 | seq_printf(seq, ",name=%s", root->name); |
| 1106 | mutex_unlock(&cgroup_root_mutex); | ||
| 1107 | return 0; | 1090 | return 0; |
| 1108 | } | 1091 | } |
| 1109 | 1092 | ||
| @@ -1115,9 +1098,6 @@ struct cgroup_sb_opts { | |||
| 1115 | char *name; | 1098 | char *name; |
| 1116 | /* User explicitly requested empty subsystem */ | 1099 | /* User explicitly requested empty subsystem */ |
| 1117 | bool none; | 1100 | bool none; |
| 1118 | |||
| 1119 | struct cgroupfs_root *new_root; | ||
| 1120 | |||
| 1121 | }; | 1101 | }; |
| 1122 | 1102 | ||
| 1123 | /* | 1103 | /* |
| @@ -1137,7 +1117,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1137 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | 1117 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); |
| 1138 | 1118 | ||
| 1139 | #ifdef CONFIG_CPUSETS | 1119 | #ifdef CONFIG_CPUSETS |
| 1140 | mask = ~(1UL << cpuset_subsys_id); | 1120 | mask = ~(1UL << cpuset_cgrp_id); |
| 1141 | #endif | 1121 | #endif |
| 1142 | 1122 | ||
| 1143 | memset(opts, 0, sizeof(*opts)); | 1123 | memset(opts, 0, sizeof(*opts)); |
| @@ -1227,30 +1207,34 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1227 | return -ENOENT; | 1207 | return -ENOENT; |
| 1228 | } | 1208 | } |
| 1229 | 1209 | ||
| 1230 | /* | ||
| 1231 | * If the 'all' option was specified select all the subsystems, | ||
| 1232 | * otherwise if 'none', 'name=' and a subsystem name options | ||
| 1233 | * were not specified, let's default to 'all' | ||
| 1234 | */ | ||
| 1235 | if (all_ss || (!one_ss && !opts->none && !opts->name)) | ||
| 1236 | for_each_subsys(ss, i) | ||
| 1237 | if (!ss->disabled) | ||
| 1238 | set_bit(i, &opts->subsys_mask); | ||
| 1239 | |||
| 1240 | /* Consistency checks */ | 1210 | /* Consistency checks */ |
| 1241 | 1211 | ||
| 1242 | if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { | 1212 | if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| 1243 | pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); | 1213 | pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); |
| 1244 | 1214 | ||
| 1245 | if (opts->flags & CGRP_ROOT_NOPREFIX) { | 1215 | if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) || |
| 1246 | pr_err("cgroup: sane_behavior: noprefix is not allowed\n"); | 1216 | opts->cpuset_clone_children || opts->release_agent || |
| 1217 | opts->name) { | ||
| 1218 | pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n"); | ||
| 1247 | return -EINVAL; | 1219 | return -EINVAL; |
| 1248 | } | 1220 | } |
| 1221 | } else { | ||
| 1222 | /* | ||
| 1223 | * If the 'all' option was specified select all the | ||
| 1224 | * subsystems, otherwise if 'none', 'name=' and a subsystem | ||
| 1225 | * name options were not specified, let's default to 'all' | ||
| 1226 | */ | ||
| 1227 | if (all_ss || (!one_ss && !opts->none && !opts->name)) | ||
| 1228 | for_each_subsys(ss, i) | ||
| 1229 | if (!ss->disabled) | ||
| 1230 | set_bit(i, &opts->subsys_mask); | ||
| 1249 | 1231 | ||
| 1250 | if (opts->cpuset_clone_children) { | 1232 | /* |
| 1251 | pr_err("cgroup: sane_behavior: clone_children is not allowed\n"); | 1233 | * We either have to specify by name or by subsystems. (So |
| 1234 | * all empty hierarchies must have a name). | ||
| 1235 | */ | ||
| 1236 | if (!opts->subsys_mask && !opts->name) | ||
| 1252 | return -EINVAL; | 1237 | return -EINVAL; |
| 1253 | } | ||
| 1254 | } | 1238 | } |
| 1255 | 1239 | ||
| 1256 | /* | 1240 | /* |
| @@ -1266,21 +1250,13 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1266 | if (opts->subsys_mask && opts->none) | 1250 | if (opts->subsys_mask && opts->none) |
| 1267 | return -EINVAL; | 1251 | return -EINVAL; |
| 1268 | 1252 | ||
| 1269 | /* | ||
| 1270 | * We either have to specify by name or by subsystems. (So all | ||
| 1271 | * empty hierarchies must have a name). | ||
| 1272 | */ | ||
| 1273 | if (!opts->subsys_mask && !opts->name) | ||
| 1274 | return -EINVAL; | ||
| 1275 | |||
| 1276 | return 0; | 1253 | return 0; |
| 1277 | } | 1254 | } |
| 1278 | 1255 | ||
| 1279 | static int cgroup_remount(struct super_block *sb, int *flags, char *data) | 1256 | static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) |
| 1280 | { | 1257 | { |
| 1281 | int ret = 0; | 1258 | int ret = 0; |
| 1282 | struct cgroupfs_root *root = sb->s_fs_info; | 1259 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| 1283 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 1284 | struct cgroup_sb_opts opts; | 1260 | struct cgroup_sb_opts opts; |
| 1285 | unsigned long added_mask, removed_mask; | 1261 | unsigned long added_mask, removed_mask; |
| 1286 | 1262 | ||
| @@ -1289,21 +1265,20 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1289 | return -EINVAL; | 1265 | return -EINVAL; |
| 1290 | } | 1266 | } |
| 1291 | 1267 | ||
| 1292 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | 1268 | mutex_lock(&cgroup_tree_mutex); |
| 1293 | mutex_lock(&cgroup_mutex); | 1269 | mutex_lock(&cgroup_mutex); |
| 1294 | mutex_lock(&cgroup_root_mutex); | ||
| 1295 | 1270 | ||
| 1296 | /* See what subsystems are wanted */ | 1271 | /* See what subsystems are wanted */ |
| 1297 | ret = parse_cgroupfs_options(data, &opts); | 1272 | ret = parse_cgroupfs_options(data, &opts); |
| 1298 | if (ret) | 1273 | if (ret) |
| 1299 | goto out_unlock; | 1274 | goto out_unlock; |
| 1300 | 1275 | ||
| 1301 | if (opts.subsys_mask != root->subsys_mask || opts.release_agent) | 1276 | if (opts.subsys_mask != root->cgrp.subsys_mask || opts.release_agent) |
| 1302 | pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", | 1277 | pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", |
| 1303 | task_tgid_nr(current), current->comm); | 1278 | task_tgid_nr(current), current->comm); |
| 1304 | 1279 | ||
| 1305 | added_mask = opts.subsys_mask & ~root->subsys_mask; | 1280 | added_mask = opts.subsys_mask & ~root->cgrp.subsys_mask; |
| 1306 | removed_mask = root->subsys_mask & ~opts.subsys_mask; | 1281 | removed_mask = root->cgrp.subsys_mask & ~opts.subsys_mask; |
| 1307 | 1282 | ||
| 1308 | /* Don't allow flags or name to change at remount */ | 1283 | /* Don't allow flags or name to change at remount */ |
| 1309 | if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || | 1284 | if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || |
| @@ -1316,422 +1291,332 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1316 | } | 1291 | } |
| 1317 | 1292 | ||
| 1318 | /* remounting is not allowed for populated hierarchies */ | 1293 | /* remounting is not allowed for populated hierarchies */ |
| 1319 | if (root->number_of_cgroups > 1) { | 1294 | if (!list_empty(&root->cgrp.children)) { |
| 1320 | ret = -EBUSY; | 1295 | ret = -EBUSY; |
| 1321 | goto out_unlock; | 1296 | goto out_unlock; |
| 1322 | } | 1297 | } |
| 1323 | 1298 | ||
| 1324 | ret = rebind_subsystems(root, added_mask, removed_mask); | 1299 | ret = rebind_subsystems(root, added_mask); |
| 1325 | if (ret) | 1300 | if (ret) |
| 1326 | goto out_unlock; | 1301 | goto out_unlock; |
| 1327 | 1302 | ||
| 1328 | if (opts.release_agent) | 1303 | rebind_subsystems(&cgrp_dfl_root, removed_mask); |
| 1304 | |||
| 1305 | if (opts.release_agent) { | ||
| 1306 | spin_lock(&release_agent_path_lock); | ||
| 1329 | strcpy(root->release_agent_path, opts.release_agent); | 1307 | strcpy(root->release_agent_path, opts.release_agent); |
| 1308 | spin_unlock(&release_agent_path_lock); | ||
| 1309 | } | ||
| 1330 | out_unlock: | 1310 | out_unlock: |
| 1331 | kfree(opts.release_agent); | 1311 | kfree(opts.release_agent); |
| 1332 | kfree(opts.name); | 1312 | kfree(opts.name); |
| 1333 | mutex_unlock(&cgroup_root_mutex); | ||
| 1334 | mutex_unlock(&cgroup_mutex); | 1313 | mutex_unlock(&cgroup_mutex); |
| 1335 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1314 | mutex_unlock(&cgroup_tree_mutex); |
| 1336 | return ret; | 1315 | return ret; |
| 1337 | } | 1316 | } |
| 1338 | 1317 | ||
| 1339 | static const struct super_operations cgroup_ops = { | 1318 | /* |
| 1340 | .statfs = simple_statfs, | 1319 | * To reduce the fork() overhead for systems that are not actually using |
| 1341 | .drop_inode = generic_delete_inode, | 1320 | * their cgroups capability, we don't maintain the lists running through |
| 1342 | .show_options = cgroup_show_options, | 1321 | * each css_set to its tasks until we see the list actually used - in other |
| 1343 | .remount_fs = cgroup_remount, | 1322 | * words after the first mount. |
| 1344 | }; | 1323 | */ |
| 1324 | static bool use_task_css_set_links __read_mostly; | ||
| 1325 | |||
| 1326 | static void cgroup_enable_task_cg_lists(void) | ||
| 1327 | { | ||
| 1328 | struct task_struct *p, *g; | ||
| 1329 | |||
| 1330 | down_write(&css_set_rwsem); | ||
| 1331 | |||
| 1332 | if (use_task_css_set_links) | ||
| 1333 | goto out_unlock; | ||
| 1334 | |||
| 1335 | use_task_css_set_links = true; | ||
| 1336 | |||
| 1337 | /* | ||
| 1338 | * We need tasklist_lock because RCU is not safe against | ||
| 1339 | * while_each_thread(). Besides, a forking task that has passed | ||
| 1340 | * cgroup_post_fork() without seeing use_task_css_set_links = 1 | ||
| 1341 | * is not guaranteed to have its child immediately visible in the | ||
| 1342 | * tasklist if we walk through it with RCU. | ||
| 1343 | */ | ||
| 1344 | read_lock(&tasklist_lock); | ||
| 1345 | do_each_thread(g, p) { | ||
| 1346 | WARN_ON_ONCE(!list_empty(&p->cg_list) || | ||
| 1347 | task_css_set(p) != &init_css_set); | ||
| 1348 | |||
| 1349 | /* | ||
| 1350 | * We should check if the process is exiting, otherwise | ||
| 1351 | * it will race with cgroup_exit() in that the list | ||
| 1352 | * entry won't be deleted though the process has exited. | ||
| 1353 | * Do it while holding siglock so that we don't end up | ||
| 1354 | * racing against cgroup_exit(). | ||
| 1355 | */ | ||
| 1356 | spin_lock_irq(&p->sighand->siglock); | ||
| 1357 | if (!(p->flags & PF_EXITING)) { | ||
| 1358 | struct css_set *cset = task_css_set(p); | ||
| 1359 | |||
| 1360 | list_add(&p->cg_list, &cset->tasks); | ||
| 1361 | get_css_set(cset); | ||
| 1362 | } | ||
| 1363 | spin_unlock_irq(&p->sighand->siglock); | ||
| 1364 | } while_each_thread(g, p); | ||
| 1365 | read_unlock(&tasklist_lock); | ||
| 1366 | out_unlock: | ||
| 1367 | up_write(&css_set_rwsem); | ||
| 1368 | } | ||
| 1345 | 1369 | ||
| 1346 | static void init_cgroup_housekeeping(struct cgroup *cgrp) | 1370 | static void init_cgroup_housekeeping(struct cgroup *cgrp) |
| 1347 | { | 1371 | { |
| 1372 | atomic_set(&cgrp->refcnt, 1); | ||
| 1348 | INIT_LIST_HEAD(&cgrp->sibling); | 1373 | INIT_LIST_HEAD(&cgrp->sibling); |
| 1349 | INIT_LIST_HEAD(&cgrp->children); | 1374 | INIT_LIST_HEAD(&cgrp->children); |
| 1350 | INIT_LIST_HEAD(&cgrp->files); | ||
| 1351 | INIT_LIST_HEAD(&cgrp->cset_links); | 1375 | INIT_LIST_HEAD(&cgrp->cset_links); |
| 1352 | INIT_LIST_HEAD(&cgrp->release_list); | 1376 | INIT_LIST_HEAD(&cgrp->release_list); |
| 1353 | INIT_LIST_HEAD(&cgrp->pidlists); | 1377 | INIT_LIST_HEAD(&cgrp->pidlists); |
| 1354 | mutex_init(&cgrp->pidlist_mutex); | 1378 | mutex_init(&cgrp->pidlist_mutex); |
| 1355 | cgrp->dummy_css.cgroup = cgrp; | 1379 | cgrp->dummy_css.cgroup = cgrp; |
| 1356 | simple_xattrs_init(&cgrp->xattrs); | ||
| 1357 | } | 1380 | } |
| 1358 | 1381 | ||
| 1359 | static void init_cgroup_root(struct cgroupfs_root *root) | 1382 | static void init_cgroup_root(struct cgroup_root *root, |
| 1383 | struct cgroup_sb_opts *opts) | ||
| 1360 | { | 1384 | { |
| 1361 | struct cgroup *cgrp = &root->top_cgroup; | 1385 | struct cgroup *cgrp = &root->cgrp; |
| 1362 | 1386 | ||
| 1363 | INIT_LIST_HEAD(&root->root_list); | 1387 | INIT_LIST_HEAD(&root->root_list); |
| 1364 | root->number_of_cgroups = 1; | 1388 | atomic_set(&root->nr_cgrps, 1); |
| 1365 | cgrp->root = root; | 1389 | cgrp->root = root; |
| 1366 | RCU_INIT_POINTER(cgrp->name, &root_cgroup_name); | ||
| 1367 | init_cgroup_housekeeping(cgrp); | 1390 | init_cgroup_housekeeping(cgrp); |
| 1368 | idr_init(&root->cgroup_idr); | 1391 | idr_init(&root->cgroup_idr); |
| 1369 | } | ||
| 1370 | |||
| 1371 | static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) | ||
| 1372 | { | ||
| 1373 | int id; | ||
| 1374 | 1392 | ||
| 1375 | lockdep_assert_held(&cgroup_mutex); | ||
| 1376 | lockdep_assert_held(&cgroup_root_mutex); | ||
| 1377 | |||
| 1378 | id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, | ||
| 1379 | GFP_KERNEL); | ||
| 1380 | if (id < 0) | ||
| 1381 | return id; | ||
| 1382 | |||
| 1383 | root->hierarchy_id = id; | ||
| 1384 | return 0; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | static void cgroup_exit_root_id(struct cgroupfs_root *root) | ||
| 1388 | { | ||
| 1389 | lockdep_assert_held(&cgroup_mutex); | ||
| 1390 | lockdep_assert_held(&cgroup_root_mutex); | ||
| 1391 | |||
| 1392 | if (root->hierarchy_id) { | ||
| 1393 | idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); | ||
| 1394 | root->hierarchy_id = 0; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | static int cgroup_test_super(struct super_block *sb, void *data) | ||
| 1399 | { | ||
| 1400 | struct cgroup_sb_opts *opts = data; | ||
| 1401 | struct cgroupfs_root *root = sb->s_fs_info; | ||
| 1402 | |||
| 1403 | /* If we asked for a name then it must match */ | ||
| 1404 | if (opts->name && strcmp(opts->name, root->name)) | ||
| 1405 | return 0; | ||
| 1406 | |||
| 1407 | /* | ||
| 1408 | * If we asked for subsystems (or explicitly for no | ||
| 1409 | * subsystems) then they must match | ||
| 1410 | */ | ||
| 1411 | if ((opts->subsys_mask || opts->none) | ||
| 1412 | && (opts->subsys_mask != root->subsys_mask)) | ||
| 1413 | return 0; | ||
| 1414 | |||
| 1415 | return 1; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) | ||
| 1419 | { | ||
| 1420 | struct cgroupfs_root *root; | ||
| 1421 | |||
| 1422 | if (!opts->subsys_mask && !opts->none) | ||
| 1423 | return NULL; | ||
| 1424 | |||
| 1425 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
| 1426 | if (!root) | ||
| 1427 | return ERR_PTR(-ENOMEM); | ||
| 1428 | |||
| 1429 | init_cgroup_root(root); | ||
| 1430 | |||
| 1431 | /* | ||
| 1432 | * We need to set @root->subsys_mask now so that @root can be | ||
| 1433 | * matched by cgroup_test_super() before it finishes | ||
| 1434 | * initialization; otherwise, competing mounts with the same | ||
| 1435 | * options may try to bind the same subsystems instead of waiting | ||
| 1436 | * for the first one leading to unexpected mount errors. | ||
| 1437 | * SUBSYS_BOUND will be set once actual binding is complete. | ||
| 1438 | */ | ||
| 1439 | root->subsys_mask = opts->subsys_mask; | ||
| 1440 | root->flags = opts->flags; | 1393 | root->flags = opts->flags; |
| 1441 | if (opts->release_agent) | 1394 | if (opts->release_agent) |
| 1442 | strcpy(root->release_agent_path, opts->release_agent); | 1395 | strcpy(root->release_agent_path, opts->release_agent); |
| 1443 | if (opts->name) | 1396 | if (opts->name) |
| 1444 | strcpy(root->name, opts->name); | 1397 | strcpy(root->name, opts->name); |
| 1445 | if (opts->cpuset_clone_children) | 1398 | if (opts->cpuset_clone_children) |
| 1446 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags); | 1399 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); |
| 1447 | return root; | ||
| 1448 | } | 1400 | } |
| 1449 | 1401 | ||
| 1450 | static void cgroup_free_root(struct cgroupfs_root *root) | 1402 | static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask) |
| 1451 | { | 1403 | { |
| 1452 | if (root) { | 1404 | LIST_HEAD(tmp_links); |
| 1453 | /* hierarhcy ID shoulid already have been released */ | 1405 | struct cgroup *root_cgrp = &root->cgrp; |
| 1454 | WARN_ON_ONCE(root->hierarchy_id); | 1406 | struct css_set *cset; |
| 1455 | 1407 | int i, ret; | |
| 1456 | idr_destroy(&root->cgroup_idr); | ||
| 1457 | kfree(root); | ||
| 1458 | } | ||
| 1459 | } | ||
| 1460 | 1408 | ||
| 1461 | static int cgroup_set_super(struct super_block *sb, void *data) | 1409 | lockdep_assert_held(&cgroup_tree_mutex); |
| 1462 | { | 1410 | lockdep_assert_held(&cgroup_mutex); |
| 1463 | int ret; | ||
| 1464 | struct cgroup_sb_opts *opts = data; | ||
| 1465 | 1411 | ||
| 1466 | /* If we don't have a new root, we can't set up a new sb */ | 1412 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
| 1467 | if (!opts->new_root) | 1413 | if (ret < 0) |
| 1468 | return -EINVAL; | 1414 | goto out; |
| 1415 | root_cgrp->id = ret; | ||
| 1469 | 1416 | ||
| 1470 | BUG_ON(!opts->subsys_mask && !opts->none); | 1417 | /* |
| 1418 | * We're accessing css_set_count without locking css_set_rwsem here, | ||
| 1419 | * but that's OK - it can only be increased by someone holding | ||
| 1420 | * cgroup_lock, and that's us. The worst that can happen is that we | ||
| 1421 | * have some link structures left over | ||
| 1422 | */ | ||
| 1423 | ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); | ||
| 1424 | if (ret) | ||
| 1425 | goto out; | ||
| 1471 | 1426 | ||
| 1472 | ret = set_anon_super(sb, NULL); | 1427 | ret = cgroup_init_root_id(root); |
| 1473 | if (ret) | 1428 | if (ret) |
| 1474 | return ret; | 1429 | goto out; |
| 1475 | 1430 | ||
| 1476 | sb->s_fs_info = opts->new_root; | 1431 | root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops, |
| 1477 | opts->new_root->sb = sb; | 1432 | KERNFS_ROOT_CREATE_DEACTIVATED, |
| 1433 | root_cgrp); | ||
| 1434 | if (IS_ERR(root->kf_root)) { | ||
| 1435 | ret = PTR_ERR(root->kf_root); | ||
| 1436 | goto exit_root_id; | ||
| 1437 | } | ||
| 1438 | root_cgrp->kn = root->kf_root->kn; | ||
| 1478 | 1439 | ||
| 1479 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1440 | ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); |
| 1480 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1441 | if (ret) |
| 1481 | sb->s_magic = CGROUP_SUPER_MAGIC; | 1442 | goto destroy_root; |
| 1482 | sb->s_op = &cgroup_ops; | ||
| 1483 | 1443 | ||
| 1484 | return 0; | 1444 | ret = rebind_subsystems(root, ss_mask); |
| 1485 | } | 1445 | if (ret) |
| 1446 | goto destroy_root; | ||
| 1486 | 1447 | ||
| 1487 | static int cgroup_get_rootdir(struct super_block *sb) | 1448 | /* |
| 1488 | { | 1449 | * There must be no failure case after here, since rebinding takes |
| 1489 | static const struct dentry_operations cgroup_dops = { | 1450 | * care of subsystems' refcounts, which are explicitly dropped in |
| 1490 | .d_iput = cgroup_diput, | 1451 | * the failure exit path. |
| 1491 | .d_delete = always_delete_dentry, | 1452 | */ |
| 1492 | }; | 1453 | list_add(&root->root_list, &cgroup_roots); |
| 1454 | cgroup_root_count++; | ||
| 1493 | 1455 | ||
| 1494 | struct inode *inode = | 1456 | /* |
| 1495 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); | 1457 | * Link the root cgroup in this hierarchy into all the css_set |
| 1458 | * objects. | ||
| 1459 | */ | ||
| 1460 | down_write(&css_set_rwsem); | ||
| 1461 | hash_for_each(css_set_table, i, cset, hlist) | ||
| 1462 | link_css_set(&tmp_links, cset, root_cgrp); | ||
| 1463 | up_write(&css_set_rwsem); | ||
| 1496 | 1464 | ||
| 1497 | if (!inode) | 1465 | BUG_ON(!list_empty(&root_cgrp->children)); |
| 1498 | return -ENOMEM; | 1466 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); |
| 1499 | 1467 | ||
| 1500 | inode->i_fop = &simple_dir_operations; | 1468 | kernfs_activate(root_cgrp->kn); |
| 1501 | inode->i_op = &cgroup_dir_inode_operations; | 1469 | ret = 0; |
| 1502 | /* directories start off with i_nlink == 2 (for "." entry) */ | 1470 | goto out; |
| 1503 | inc_nlink(inode); | 1471 | |
| 1504 | sb->s_root = d_make_root(inode); | 1472 | destroy_root: |
| 1505 | if (!sb->s_root) | 1473 | kernfs_destroy_root(root->kf_root); |
| 1506 | return -ENOMEM; | 1474 | root->kf_root = NULL; |
| 1507 | /* for everything else we want ->d_op set */ | 1475 | exit_root_id: |
| 1508 | sb->s_d_op = &cgroup_dops; | 1476 | cgroup_exit_root_id(root); |
| 1509 | return 0; | 1477 | out: |
| 1478 | free_cgrp_cset_links(&tmp_links); | ||
| 1479 | return ret; | ||
| 1510 | } | 1480 | } |
| 1511 | 1481 | ||
| 1512 | static struct dentry *cgroup_mount(struct file_system_type *fs_type, | 1482 | static struct dentry *cgroup_mount(struct file_system_type *fs_type, |
| 1513 | int flags, const char *unused_dev_name, | 1483 | int flags, const char *unused_dev_name, |
| 1514 | void *data) | 1484 | void *data) |
| 1515 | { | 1485 | { |
| 1486 | struct cgroup_root *root; | ||
| 1516 | struct cgroup_sb_opts opts; | 1487 | struct cgroup_sb_opts opts; |
| 1517 | struct cgroupfs_root *root; | 1488 | struct dentry *dentry; |
| 1518 | int ret = 0; | 1489 | int ret; |
| 1519 | struct super_block *sb; | 1490 | bool new_sb; |
| 1520 | struct cgroupfs_root *new_root; | ||
| 1521 | struct list_head tmp_links; | ||
| 1522 | struct inode *inode; | ||
| 1523 | const struct cred *cred; | ||
| 1524 | 1491 | ||
| 1525 | /* First find the desired set of subsystems */ | 1492 | /* |
| 1493 | * The first time anyone tries to mount a cgroup, enable the list | ||
| 1494 | * linking each css_set to its tasks and fix up all existing tasks. | ||
| 1495 | */ | ||
| 1496 | if (!use_task_css_set_links) | ||
| 1497 | cgroup_enable_task_cg_lists(); | ||
| 1498 | retry: | ||
| 1499 | mutex_lock(&cgroup_tree_mutex); | ||
| 1526 | mutex_lock(&cgroup_mutex); | 1500 | mutex_lock(&cgroup_mutex); |
| 1501 | |||
| 1502 | /* First find the desired set of subsystems */ | ||
| 1527 | ret = parse_cgroupfs_options(data, &opts); | 1503 | ret = parse_cgroupfs_options(data, &opts); |
| 1528 | mutex_unlock(&cgroup_mutex); | ||
| 1529 | if (ret) | 1504 | if (ret) |
| 1530 | goto out_err; | 1505 | goto out_unlock; |
| 1531 | |||
| 1532 | /* | ||
| 1533 | * Allocate a new cgroup root. We may not need it if we're | ||
| 1534 | * reusing an existing hierarchy. | ||
| 1535 | */ | ||
| 1536 | new_root = cgroup_root_from_opts(&opts); | ||
| 1537 | if (IS_ERR(new_root)) { | ||
| 1538 | ret = PTR_ERR(new_root); | ||
| 1539 | goto out_err; | ||
| 1540 | } | ||
| 1541 | opts.new_root = new_root; | ||
| 1542 | 1506 | ||
| 1543 | /* Locate an existing or new sb for this hierarchy */ | 1507 | /* look for a matching existing root */ |
| 1544 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); | 1508 | if (!opts.subsys_mask && !opts.none && !opts.name) { |
| 1545 | if (IS_ERR(sb)) { | 1509 | cgrp_dfl_root_visible = true; |
| 1546 | ret = PTR_ERR(sb); | 1510 | root = &cgrp_dfl_root; |
| 1547 | cgroup_free_root(opts.new_root); | 1511 | cgroup_get(&root->cgrp); |
| 1548 | goto out_err; | 1512 | ret = 0; |
| 1513 | goto out_unlock; | ||
| 1549 | } | 1514 | } |
| 1550 | 1515 | ||
| 1551 | root = sb->s_fs_info; | 1516 | for_each_root(root) { |
| 1552 | BUG_ON(!root); | 1517 | bool name_match = false; |
| 1553 | if (root == opts.new_root) { | ||
| 1554 | /* We used the new root structure, so this is a new hierarchy */ | ||
| 1555 | struct cgroup *root_cgrp = &root->top_cgroup; | ||
| 1556 | struct cgroupfs_root *existing_root; | ||
| 1557 | int i; | ||
| 1558 | struct css_set *cset; | ||
| 1559 | |||
| 1560 | BUG_ON(sb->s_root != NULL); | ||
| 1561 | 1518 | ||
| 1562 | ret = cgroup_get_rootdir(sb); | 1519 | if (root == &cgrp_dfl_root) |
| 1563 | if (ret) | 1520 | continue; |
| 1564 | goto drop_new_super; | ||
| 1565 | inode = sb->s_root->d_inode; | ||
| 1566 | |||
| 1567 | mutex_lock(&inode->i_mutex); | ||
| 1568 | mutex_lock(&cgroup_mutex); | ||
| 1569 | mutex_lock(&cgroup_root_mutex); | ||
| 1570 | |||
| 1571 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); | ||
| 1572 | if (ret < 0) | ||
| 1573 | goto unlock_drop; | ||
| 1574 | root_cgrp->id = ret; | ||
| 1575 | |||
| 1576 | /* Check for name clashes with existing mounts */ | ||
| 1577 | ret = -EBUSY; | ||
| 1578 | if (strlen(root->name)) | ||
| 1579 | for_each_active_root(existing_root) | ||
| 1580 | if (!strcmp(existing_root->name, root->name)) | ||
| 1581 | goto unlock_drop; | ||
| 1582 | |||
| 1583 | /* | ||
| 1584 | * We're accessing css_set_count without locking | ||
| 1585 | * css_set_lock here, but that's OK - it can only be | ||
| 1586 | * increased by someone holding cgroup_lock, and | ||
| 1587 | * that's us. The worst that can happen is that we | ||
| 1588 | * have some link structures left over | ||
| 1589 | */ | ||
| 1590 | ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); | ||
| 1591 | if (ret) | ||
| 1592 | goto unlock_drop; | ||
| 1593 | |||
| 1594 | /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ | ||
| 1595 | ret = cgroup_init_root_id(root, 2, 0); | ||
| 1596 | if (ret) | ||
| 1597 | goto unlock_drop; | ||
| 1598 | |||
| 1599 | sb->s_root->d_fsdata = root_cgrp; | ||
| 1600 | root_cgrp->dentry = sb->s_root; | ||
| 1601 | |||
| 1602 | /* | ||
| 1603 | * We're inside get_sb() and will call lookup_one_len() to | ||
| 1604 | * create the root files, which doesn't work if SELinux is | ||
| 1605 | * in use. The following cred dancing somehow works around | ||
| 1606 | * it. See 2ce9738ba ("cgroupfs: use init_cred when | ||
| 1607 | * populating new cgroupfs mount") for more details. | ||
| 1608 | */ | ||
| 1609 | cred = override_creds(&init_cred); | ||
| 1610 | |||
| 1611 | ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); | ||
| 1612 | if (ret) | ||
| 1613 | goto rm_base_files; | ||
| 1614 | |||
| 1615 | ret = rebind_subsystems(root, root->subsys_mask, 0); | ||
| 1616 | if (ret) | ||
| 1617 | goto rm_base_files; | ||
| 1618 | |||
| 1619 | revert_creds(cred); | ||
| 1620 | 1521 | ||
| 1621 | /* | 1522 | /* |
| 1622 | * There must be no failure case after here, since rebinding | 1523 | * If we asked for a name then it must match. Also, if |
| 1623 | * takes care of subsystems' refcounts, which are explicitly | 1524 | * name matches but sybsys_mask doesn't, we should fail. |
| 1624 | * dropped in the failure exit path. | 1525 | * Remember whether name matched. |
| 1625 | */ | 1526 | */ |
| 1527 | if (opts.name) { | ||
| 1528 | if (strcmp(opts.name, root->name)) | ||
| 1529 | continue; | ||
| 1530 | name_match = true; | ||
| 1531 | } | ||
| 1626 | 1532 | ||
| 1627 | list_add(&root->root_list, &cgroup_roots); | ||
| 1628 | cgroup_root_count++; | ||
| 1629 | |||
| 1630 | /* Link the top cgroup in this hierarchy into all | ||
| 1631 | * the css_set objects */ | ||
| 1632 | write_lock(&css_set_lock); | ||
| 1633 | hash_for_each(css_set_table, i, cset, hlist) | ||
| 1634 | link_css_set(&tmp_links, cset, root_cgrp); | ||
| 1635 | write_unlock(&css_set_lock); | ||
| 1636 | |||
| 1637 | free_cgrp_cset_links(&tmp_links); | ||
| 1638 | |||
| 1639 | BUG_ON(!list_empty(&root_cgrp->children)); | ||
| 1640 | BUG_ON(root->number_of_cgroups != 1); | ||
| 1641 | |||
| 1642 | mutex_unlock(&cgroup_root_mutex); | ||
| 1643 | mutex_unlock(&cgroup_mutex); | ||
| 1644 | mutex_unlock(&inode->i_mutex); | ||
| 1645 | } else { | ||
| 1646 | /* | 1533 | /* |
| 1647 | * We re-used an existing hierarchy - the new root (if | 1534 | * If we asked for subsystems (or explicitly for no |
| 1648 | * any) is not needed | 1535 | * subsystems) then they must match. |
| 1649 | */ | 1536 | */ |
| 1650 | cgroup_free_root(opts.new_root); | 1537 | if ((opts.subsys_mask || opts.none) && |
| 1538 | (opts.subsys_mask != root->cgrp.subsys_mask)) { | ||
| 1539 | if (!name_match) | ||
| 1540 | continue; | ||
| 1541 | ret = -EBUSY; | ||
| 1542 | goto out_unlock; | ||
| 1543 | } | ||
| 1651 | 1544 | ||
| 1652 | if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { | 1545 | if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { |
| 1653 | if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { | 1546 | if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { |
| 1654 | pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); | 1547 | pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); |
| 1655 | ret = -EINVAL; | 1548 | ret = -EINVAL; |
| 1656 | goto drop_new_super; | 1549 | goto out_unlock; |
| 1657 | } else { | 1550 | } else { |
| 1658 | pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); | 1551 | pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); |
| 1659 | } | 1552 | } |
| 1660 | } | 1553 | } |
| 1661 | } | ||
| 1662 | |||
| 1663 | kfree(opts.release_agent); | ||
| 1664 | kfree(opts.name); | ||
| 1665 | return dget(sb->s_root); | ||
| 1666 | 1554 | ||
| 1667 | rm_base_files: | 1555 | /* |
| 1668 | free_cgrp_cset_links(&tmp_links); | 1556 | * A root's lifetime is governed by its root cgroup. Zero |
| 1669 | cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); | 1557 | * ref indicate that the root is being destroyed. Wait for |
| 1670 | revert_creds(cred); | 1558 | * destruction to complete so that the subsystems are free. |
| 1671 | unlock_drop: | 1559 | * We can use wait_queue for the wait but this path is |
| 1672 | cgroup_exit_root_id(root); | 1560 | * super cold. Let's just sleep for a bit and retry. |
| 1673 | mutex_unlock(&cgroup_root_mutex); | 1561 | */ |
| 1674 | mutex_unlock(&cgroup_mutex); | 1562 | if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { |
| 1675 | mutex_unlock(&inode->i_mutex); | 1563 | mutex_unlock(&cgroup_mutex); |
| 1676 | drop_new_super: | 1564 | mutex_unlock(&cgroup_tree_mutex); |
| 1677 | deactivate_locked_super(sb); | 1565 | kfree(opts.release_agent); |
| 1678 | out_err: | 1566 | kfree(opts.name); |
| 1679 | kfree(opts.release_agent); | 1567 | msleep(10); |
| 1680 | kfree(opts.name); | 1568 | goto retry; |
| 1681 | return ERR_PTR(ret); | 1569 | } |
| 1682 | } | ||
| 1683 | |||
| 1684 | static void cgroup_kill_sb(struct super_block *sb) | ||
| 1685 | { | ||
| 1686 | struct cgroupfs_root *root = sb->s_fs_info; | ||
| 1687 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 1688 | struct cgrp_cset_link *link, *tmp_link; | ||
| 1689 | int ret; | ||
| 1690 | |||
| 1691 | BUG_ON(!root); | ||
| 1692 | |||
| 1693 | BUG_ON(root->number_of_cgroups != 1); | ||
| 1694 | BUG_ON(!list_empty(&cgrp->children)); | ||
| 1695 | |||
| 1696 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | ||
| 1697 | mutex_lock(&cgroup_mutex); | ||
| 1698 | mutex_lock(&cgroup_root_mutex); | ||
| 1699 | 1570 | ||
| 1700 | /* Rebind all subsystems back to the default hierarchy */ | 1571 | ret = 0; |
| 1701 | if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { | 1572 | goto out_unlock; |
| 1702 | ret = rebind_subsystems(root, 0, root->subsys_mask); | ||
| 1703 | /* Shouldn't be able to fail ... */ | ||
| 1704 | BUG_ON(ret); | ||
| 1705 | } | 1573 | } |
| 1706 | 1574 | ||
| 1707 | /* | 1575 | /* |
| 1708 | * Release all the links from cset_links to this hierarchy's | 1576 | * No such thing, create a new one. name= matching without subsys |
| 1709 | * root cgroup | 1577 | * specification is allowed for already existing hierarchies but we |
| 1578 | * can't create new one without subsys specification. | ||
| 1710 | */ | 1579 | */ |
| 1711 | write_lock(&css_set_lock); | 1580 | if (!opts.subsys_mask && !opts.none) { |
| 1712 | 1581 | ret = -EINVAL; | |
| 1713 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | 1582 | goto out_unlock; |
| 1714 | list_del(&link->cset_link); | ||
| 1715 | list_del(&link->cgrp_link); | ||
| 1716 | kfree(link); | ||
| 1717 | } | 1583 | } |
| 1718 | write_unlock(&css_set_lock); | ||
| 1719 | 1584 | ||
| 1720 | if (!list_empty(&root->root_list)) { | 1585 | root = kzalloc(sizeof(*root), GFP_KERNEL); |
| 1721 | list_del(&root->root_list); | 1586 | if (!root) { |
| 1722 | cgroup_root_count--; | 1587 | ret = -ENOMEM; |
| 1588 | goto out_unlock; | ||
| 1723 | } | 1589 | } |
| 1724 | 1590 | ||
| 1725 | cgroup_exit_root_id(root); | 1591 | init_cgroup_root(root, &opts); |
| 1592 | |||
| 1593 | ret = cgroup_setup_root(root, opts.subsys_mask); | ||
| 1594 | if (ret) | ||
| 1595 | cgroup_free_root(root); | ||
| 1726 | 1596 | ||
| 1727 | mutex_unlock(&cgroup_root_mutex); | 1597 | out_unlock: |
| 1728 | mutex_unlock(&cgroup_mutex); | 1598 | mutex_unlock(&cgroup_mutex); |
| 1729 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1599 | mutex_unlock(&cgroup_tree_mutex); |
| 1600 | |||
| 1601 | kfree(opts.release_agent); | ||
| 1602 | kfree(opts.name); | ||
| 1730 | 1603 | ||
| 1731 | simple_xattrs_free(&cgrp->xattrs); | 1604 | if (ret) |
| 1605 | return ERR_PTR(ret); | ||
| 1732 | 1606 | ||
| 1733 | kill_litter_super(sb); | 1607 | dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb); |
| 1734 | cgroup_free_root(root); | 1608 | if (IS_ERR(dentry) || !new_sb) |
| 1609 | cgroup_put(&root->cgrp); | ||
| 1610 | return dentry; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | static void cgroup_kill_sb(struct super_block *sb) | ||
| 1614 | { | ||
| 1615 | struct kernfs_root *kf_root = kernfs_root_from_sb(sb); | ||
| 1616 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); | ||
| 1617 | |||
| 1618 | cgroup_put(&root->cgrp); | ||
| 1619 | kernfs_kill_sb(sb); | ||
| 1735 | } | 1620 | } |
| 1736 | 1621 | ||
| 1737 | static struct file_system_type cgroup_fs_type = { | 1622 | static struct file_system_type cgroup_fs_type = { |
| @@ -1743,57 +1628,6 @@ static struct file_system_type cgroup_fs_type = { | |||
| 1743 | static struct kobject *cgroup_kobj; | 1628 | static struct kobject *cgroup_kobj; |
| 1744 | 1629 | ||
| 1745 | /** | 1630 | /** |
| 1746 | * cgroup_path - generate the path of a cgroup | ||
| 1747 | * @cgrp: the cgroup in question | ||
| 1748 | * @buf: the buffer to write the path into | ||
| 1749 | * @buflen: the length of the buffer | ||
| 1750 | * | ||
| 1751 | * Writes path of cgroup into buf. Returns 0 on success, -errno on error. | ||
| 1752 | * | ||
| 1753 | * We can't generate cgroup path using dentry->d_name, as accessing | ||
| 1754 | * dentry->name must be protected by irq-unsafe dentry->d_lock or parent | ||
| 1755 | * inode's i_mutex, while on the other hand cgroup_path() can be called | ||
| 1756 | * with some irq-safe spinlocks held. | ||
| 1757 | */ | ||
| 1758 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | ||
| 1759 | { | ||
| 1760 | int ret = -ENAMETOOLONG; | ||
| 1761 | char *start; | ||
| 1762 | |||
| 1763 | if (!cgrp->parent) { | ||
| 1764 | if (strlcpy(buf, "/", buflen) >= buflen) | ||
| 1765 | return -ENAMETOOLONG; | ||
| 1766 | return 0; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | start = buf + buflen - 1; | ||
| 1770 | *start = '\0'; | ||
| 1771 | |||
| 1772 | rcu_read_lock(); | ||
| 1773 | do { | ||
| 1774 | const char *name = cgroup_name(cgrp); | ||
| 1775 | int len; | ||
| 1776 | |||
| 1777 | len = strlen(name); | ||
| 1778 | if ((start -= len) < buf) | ||
| 1779 | goto out; | ||
| 1780 | memcpy(start, name, len); | ||
| 1781 | |||
| 1782 | if (--start < buf) | ||
| 1783 | goto out; | ||
| 1784 | *start = '/'; | ||
| 1785 | |||
| 1786 | cgrp = cgrp->parent; | ||
| 1787 | } while (cgrp->parent); | ||
| 1788 | ret = 0; | ||
| 1789 | memmove(buf, start, buf + buflen - start); | ||
| 1790 | out: | ||
| 1791 | rcu_read_unlock(); | ||
| 1792 | return ret; | ||
| 1793 | } | ||
| 1794 | EXPORT_SYMBOL_GPL(cgroup_path); | ||
| 1795 | |||
| 1796 | /** | ||
| 1797 | * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy | 1631 | * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy |
| 1798 | * @task: target task | 1632 | * @task: target task |
| 1799 | * @buf: the buffer to write the path into | 1633 | * @buf: the buffer to write the path into |
| @@ -1804,49 +1638,55 @@ EXPORT_SYMBOL_GPL(cgroup_path); | |||
| 1804 | * function grabs cgroup_mutex and shouldn't be used inside locks used by | 1638 | * function grabs cgroup_mutex and shouldn't be used inside locks used by |
| 1805 | * cgroup controller callbacks. | 1639 | * cgroup controller callbacks. |
| 1806 | * | 1640 | * |
| 1807 | * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. | 1641 | * Return value is the same as kernfs_path(). |
| 1808 | */ | 1642 | */ |
| 1809 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) | 1643 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
| 1810 | { | 1644 | { |
| 1811 | struct cgroupfs_root *root; | 1645 | struct cgroup_root *root; |
| 1812 | struct cgroup *cgrp; | 1646 | struct cgroup *cgrp; |
| 1813 | int hierarchy_id = 1, ret = 0; | 1647 | int hierarchy_id = 1; |
| 1814 | 1648 | char *path = NULL; | |
| 1815 | if (buflen < 2) | ||
| 1816 | return -ENAMETOOLONG; | ||
| 1817 | 1649 | ||
| 1818 | mutex_lock(&cgroup_mutex); | 1650 | mutex_lock(&cgroup_mutex); |
| 1651 | down_read(&css_set_rwsem); | ||
| 1819 | 1652 | ||
| 1820 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); | 1653 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
| 1821 | 1654 | ||
| 1822 | if (root) { | 1655 | if (root) { |
| 1823 | cgrp = task_cgroup_from_root(task, root); | 1656 | cgrp = task_cgroup_from_root(task, root); |
| 1824 | ret = cgroup_path(cgrp, buf, buflen); | 1657 | path = cgroup_path(cgrp, buf, buflen); |
| 1825 | } else { | 1658 | } else { |
| 1826 | /* if no hierarchy exists, everyone is in "/" */ | 1659 | /* if no hierarchy exists, everyone is in "/" */ |
| 1827 | memcpy(buf, "/", 2); | 1660 | if (strlcpy(buf, "/", buflen) < buflen) |
| 1661 | path = buf; | ||
| 1828 | } | 1662 | } |
| 1829 | 1663 | ||
| 1664 | up_read(&css_set_rwsem); | ||
| 1830 | mutex_unlock(&cgroup_mutex); | 1665 | mutex_unlock(&cgroup_mutex); |
| 1831 | return ret; | 1666 | return path; |
| 1832 | } | 1667 | } |
| 1833 | EXPORT_SYMBOL_GPL(task_cgroup_path); | 1668 | EXPORT_SYMBOL_GPL(task_cgroup_path); |
| 1834 | 1669 | ||
| 1835 | /* | 1670 | /* used to track tasks and other necessary states during migration */ |
| 1836 | * Control Group taskset | ||
| 1837 | */ | ||
| 1838 | struct task_and_cgroup { | ||
| 1839 | struct task_struct *task; | ||
| 1840 | struct cgroup *cgrp; | ||
| 1841 | struct css_set *cset; | ||
| 1842 | }; | ||
| 1843 | |||
| 1844 | struct cgroup_taskset { | 1671 | struct cgroup_taskset { |
| 1845 | struct task_and_cgroup single; | 1672 | /* the src and dst cset list running through cset->mg_node */ |
| 1846 | struct flex_array *tc_array; | 1673 | struct list_head src_csets; |
| 1847 | int tc_array_len; | 1674 | struct list_head dst_csets; |
| 1848 | int idx; | 1675 | |
| 1849 | struct cgroup *cur_cgrp; | 1676 | /* |
| 1677 | * Fields for cgroup_taskset_*() iteration. | ||
| 1678 | * | ||
| 1679 | * Before migration is committed, the target migration tasks are on | ||
| 1680 | * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of | ||
| 1681 | * the csets on ->dst_csets. ->csets point to either ->src_csets | ||
| 1682 | * or ->dst_csets depending on whether migration is committed. | ||
| 1683 | * | ||
| 1684 | * ->cur_csets and ->cur_task point to the current task position | ||
| 1685 | * during iteration. | ||
| 1686 | */ | ||
| 1687 | struct list_head *csets; | ||
| 1688 | struct css_set *cur_cset; | ||
| 1689 | struct task_struct *cur_task; | ||
| 1850 | }; | 1690 | }; |
| 1851 | 1691 | ||
| 1852 | /** | 1692 | /** |
| @@ -1857,15 +1697,11 @@ struct cgroup_taskset { | |||
| 1857 | */ | 1697 | */ |
| 1858 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) | 1698 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) |
| 1859 | { | 1699 | { |
| 1860 | if (tset->tc_array) { | 1700 | tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); |
| 1861 | tset->idx = 0; | 1701 | tset->cur_task = NULL; |
| 1862 | return cgroup_taskset_next(tset); | 1702 | |
| 1863 | } else { | 1703 | return cgroup_taskset_next(tset); |
| 1864 | tset->cur_cgrp = tset->single.cgrp; | ||
| 1865 | return tset->single.task; | ||
| 1866 | } | ||
| 1867 | } | 1704 | } |
| 1868 | EXPORT_SYMBOL_GPL(cgroup_taskset_first); | ||
| 1869 | 1705 | ||
| 1870 | /** | 1706 | /** |
| 1871 | * cgroup_taskset_next - iterate to the next task in taskset | 1707 | * cgroup_taskset_next - iterate to the next task in taskset |
| @@ -1876,48 +1712,36 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_first); | |||
| 1876 | */ | 1712 | */ |
| 1877 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) | 1713 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) |
| 1878 | { | 1714 | { |
| 1879 | struct task_and_cgroup *tc; | 1715 | struct css_set *cset = tset->cur_cset; |
| 1716 | struct task_struct *task = tset->cur_task; | ||
| 1880 | 1717 | ||
| 1881 | if (!tset->tc_array || tset->idx >= tset->tc_array_len) | 1718 | while (&cset->mg_node != tset->csets) { |
| 1882 | return NULL; | 1719 | if (!task) |
| 1720 | task = list_first_entry(&cset->mg_tasks, | ||
| 1721 | struct task_struct, cg_list); | ||
| 1722 | else | ||
| 1723 | task = list_next_entry(task, cg_list); | ||
| 1883 | 1724 | ||
| 1884 | tc = flex_array_get(tset->tc_array, tset->idx++); | 1725 | if (&task->cg_list != &cset->mg_tasks) { |
| 1885 | tset->cur_cgrp = tc->cgrp; | 1726 | tset->cur_cset = cset; |
| 1886 | return tc->task; | 1727 | tset->cur_task = task; |
| 1887 | } | 1728 | return task; |
| 1888 | EXPORT_SYMBOL_GPL(cgroup_taskset_next); | 1729 | } |
| 1889 | 1730 | ||
| 1890 | /** | 1731 | cset = list_next_entry(cset, mg_node); |
| 1891 | * cgroup_taskset_cur_css - return the matching css for the current task | 1732 | task = NULL; |
| 1892 | * @tset: taskset of interest | 1733 | } |
| 1893 | * @subsys_id: the ID of the target subsystem | ||
| 1894 | * | ||
| 1895 | * Return the css for the current (last returned) task of @tset for | ||
| 1896 | * subsystem specified by @subsys_id. This function must be preceded by | ||
| 1897 | * either cgroup_taskset_first() or cgroup_taskset_next(). | ||
| 1898 | */ | ||
| 1899 | struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset, | ||
| 1900 | int subsys_id) | ||
| 1901 | { | ||
| 1902 | return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]); | ||
| 1903 | } | ||
| 1904 | EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css); | ||
| 1905 | 1734 | ||
| 1906 | /** | 1735 | return NULL; |
| 1907 | * cgroup_taskset_size - return the number of tasks in taskset | ||
| 1908 | * @tset: taskset of interest | ||
| 1909 | */ | ||
| 1910 | int cgroup_taskset_size(struct cgroup_taskset *tset) | ||
| 1911 | { | ||
| 1912 | return tset->tc_array ? tset->tc_array_len : 1; | ||
| 1913 | } | 1736 | } |
| 1914 | EXPORT_SYMBOL_GPL(cgroup_taskset_size); | ||
| 1915 | |||
| 1916 | 1737 | ||
| 1917 | /* | 1738 | /** |
| 1918 | * cgroup_task_migrate - move a task from one cgroup to another. | 1739 | * cgroup_task_migrate - move a task from one cgroup to another. |
| 1740 | * @old_cgrp; the cgroup @tsk is being migrated from | ||
| 1741 | * @tsk: the task being migrated | ||
| 1742 | * @new_cset: the new css_set @tsk is being attached to | ||
| 1919 | * | 1743 | * |
| 1920 | * Must be called with cgroup_mutex and threadgroup locked. | 1744 | * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked. |
| 1921 | */ | 1745 | */ |
| 1922 | static void cgroup_task_migrate(struct cgroup *old_cgrp, | 1746 | static void cgroup_task_migrate(struct cgroup *old_cgrp, |
| 1923 | struct task_struct *tsk, | 1747 | struct task_struct *tsk, |
| @@ -1925,6 +1749,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1925 | { | 1749 | { |
| 1926 | struct css_set *old_cset; | 1750 | struct css_set *old_cset; |
| 1927 | 1751 | ||
| 1752 | lockdep_assert_held(&cgroup_mutex); | ||
| 1753 | lockdep_assert_held(&css_set_rwsem); | ||
| 1754 | |||
| 1928 | /* | 1755 | /* |
| 1929 | * We are synchronized through threadgroup_lock() against PF_EXITING | 1756 | * We are synchronized through threadgroup_lock() against PF_EXITING |
| 1930 | * setting such that we can't race against cgroup_exit() changing the | 1757 | * setting such that we can't race against cgroup_exit() changing the |
| @@ -1933,15 +1760,16 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1933 | WARN_ON_ONCE(tsk->flags & PF_EXITING); | 1760 | WARN_ON_ONCE(tsk->flags & PF_EXITING); |
| 1934 | old_cset = task_css_set(tsk); | 1761 | old_cset = task_css_set(tsk); |
| 1935 | 1762 | ||
| 1936 | task_lock(tsk); | 1763 | get_css_set(new_cset); |
| 1937 | rcu_assign_pointer(tsk->cgroups, new_cset); | 1764 | rcu_assign_pointer(tsk->cgroups, new_cset); |
| 1938 | task_unlock(tsk); | ||
| 1939 | 1765 | ||
| 1940 | /* Update the css_set linked lists if we're using them */ | 1766 | /* |
| 1941 | write_lock(&css_set_lock); | 1767 | * Use move_tail so that cgroup_taskset_first() still returns the |
| 1942 | if (!list_empty(&tsk->cg_list)) | 1768 | * leader after migration. This works because cgroup_migrate() |
| 1943 | list_move(&tsk->cg_list, &new_cset->tasks); | 1769 | * ensures that the dst_cset of the leader is the first on the |
| 1944 | write_unlock(&css_set_lock); | 1770 | * tset's dst_csets list. |
| 1771 | */ | ||
| 1772 | list_move_tail(&tsk->cg_list, &new_cset->mg_tasks); | ||
| 1945 | 1773 | ||
| 1946 | /* | 1774 | /* |
| 1947 | * We just gained a reference on old_cset by taking it from the | 1775 | * We just gained a reference on old_cset by taking it from the |
| @@ -1949,100 +1777,199 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1949 | * we're safe to drop it here; it will be freed under RCU. | 1777 | * we're safe to drop it here; it will be freed under RCU. |
| 1950 | */ | 1778 | */ |
| 1951 | set_bit(CGRP_RELEASABLE, &old_cgrp->flags); | 1779 | set_bit(CGRP_RELEASABLE, &old_cgrp->flags); |
| 1952 | put_css_set(old_cset); | 1780 | put_css_set_locked(old_cset, false); |
| 1953 | } | 1781 | } |
| 1954 | 1782 | ||
| 1955 | /** | 1783 | /** |
| 1956 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | 1784 | * cgroup_migrate_finish - cleanup after attach |
| 1957 | * @cgrp: the cgroup to attach to | 1785 | * @preloaded_csets: list of preloaded css_sets |
| 1958 | * @tsk: the task or the leader of the threadgroup to be attached | ||
| 1959 | * @threadgroup: attach the whole threadgroup? | ||
| 1960 | * | 1786 | * |
| 1961 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take | 1787 | * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See |
| 1962 | * task_lock of @tsk or each thread in the threadgroup individually in turn. | 1788 | * those functions for details. |
| 1963 | */ | 1789 | */ |
| 1964 | static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | 1790 | static void cgroup_migrate_finish(struct list_head *preloaded_csets) |
| 1965 | bool threadgroup) | ||
| 1966 | { | 1791 | { |
| 1967 | int retval, i, group_size; | 1792 | struct css_set *cset, *tmp_cset; |
| 1968 | struct cgroupfs_root *root = cgrp->root; | ||
| 1969 | struct cgroup_subsys_state *css, *failed_css = NULL; | ||
| 1970 | /* threadgroup list cursor and array */ | ||
| 1971 | struct task_struct *leader = tsk; | ||
| 1972 | struct task_and_cgroup *tc; | ||
| 1973 | struct flex_array *group; | ||
| 1974 | struct cgroup_taskset tset = { }; | ||
| 1975 | 1793 | ||
| 1976 | /* | 1794 | lockdep_assert_held(&cgroup_mutex); |
| 1977 | * step 0: in order to do expensive, possibly blocking operations for | 1795 | |
| 1978 | * every thread, we cannot iterate the thread group list, since it needs | 1796 | down_write(&css_set_rwsem); |
| 1979 | * rcu or tasklist locked. instead, build an array of all threads in the | 1797 | list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { |
| 1980 | * group - group_rwsem prevents new threads from appearing, and if | 1798 | cset->mg_src_cgrp = NULL; |
| 1981 | * threads exit, this will just be an over-estimate. | 1799 | cset->mg_dst_cset = NULL; |
| 1982 | */ | 1800 | list_del_init(&cset->mg_preload_node); |
| 1983 | if (threadgroup) | 1801 | put_css_set_locked(cset, false); |
| 1984 | group_size = get_nr_threads(tsk); | 1802 | } |
| 1985 | else | 1803 | up_write(&css_set_rwsem); |
| 1986 | group_size = 1; | 1804 | } |
| 1987 | /* flex_array supports very large thread-groups better than kmalloc. */ | 1805 | |
| 1988 | group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); | 1806 | /** |
| 1989 | if (!group) | 1807 | * cgroup_migrate_add_src - add a migration source css_set |
| 1990 | return -ENOMEM; | 1808 | * @src_cset: the source css_set to add |
| 1991 | /* pre-allocate to guarantee space while iterating in rcu read-side. */ | 1809 | * @dst_cgrp: the destination cgroup |
| 1992 | retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); | 1810 | * @preloaded_csets: list of preloaded css_sets |
| 1993 | if (retval) | 1811 | * |
| 1994 | goto out_free_group_list; | 1812 | * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin |
| 1813 | * @src_cset and add it to @preloaded_csets, which should later be cleaned | ||
| 1814 | * up by cgroup_migrate_finish(). | ||
| 1815 | * | ||
| 1816 | * This function may be called without holding threadgroup_lock even if the | ||
| 1817 | * target is a process. Threads may be created and destroyed but as long | ||
| 1818 | * as cgroup_mutex is not dropped, no new css_set can be put into play and | ||
| 1819 | * the preloaded css_sets are guaranteed to cover all migrations. | ||
| 1820 | */ | ||
| 1821 | static void cgroup_migrate_add_src(struct css_set *src_cset, | ||
| 1822 | struct cgroup *dst_cgrp, | ||
| 1823 | struct list_head *preloaded_csets) | ||
| 1824 | { | ||
| 1825 | struct cgroup *src_cgrp; | ||
| 1826 | |||
| 1827 | lockdep_assert_held(&cgroup_mutex); | ||
| 1828 | lockdep_assert_held(&css_set_rwsem); | ||
| 1829 | |||
| 1830 | src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); | ||
| 1831 | |||
| 1832 | /* nothing to do if this cset already belongs to the cgroup */ | ||
| 1833 | if (src_cgrp == dst_cgrp) | ||
| 1834 | return; | ||
| 1835 | |||
| 1836 | if (!list_empty(&src_cset->mg_preload_node)) | ||
| 1837 | return; | ||
| 1838 | |||
| 1839 | WARN_ON(src_cset->mg_src_cgrp); | ||
| 1840 | WARN_ON(!list_empty(&src_cset->mg_tasks)); | ||
| 1841 | WARN_ON(!list_empty(&src_cset->mg_node)); | ||
| 1842 | |||
| 1843 | src_cset->mg_src_cgrp = src_cgrp; | ||
| 1844 | get_css_set(src_cset); | ||
| 1845 | list_add(&src_cset->mg_preload_node, preloaded_csets); | ||
| 1846 | } | ||
| 1847 | |||
| 1848 | /** | ||
| 1849 | * cgroup_migrate_prepare_dst - prepare destination css_sets for migration | ||
| 1850 | * @dst_cgrp: the destination cgroup | ||
| 1851 | * @preloaded_csets: list of preloaded source css_sets | ||
| 1852 | * | ||
| 1853 | * Tasks are about to be moved to @dst_cgrp and all the source css_sets | ||
| 1854 | * have been preloaded to @preloaded_csets. This function looks up and | ||
| 1855 | * pins all destination css_sets, links each to its source, and put them on | ||
| 1856 | * @preloaded_csets. | ||
| 1857 | * | ||
| 1858 | * This function must be called after cgroup_migrate_add_src() has been | ||
| 1859 | * called on each migration source css_set. After migration is performed | ||
| 1860 | * using cgroup_migrate(), cgroup_migrate_finish() must be called on | ||
| 1861 | * @preloaded_csets. | ||
| 1862 | */ | ||
| 1863 | static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, | ||
| 1864 | struct list_head *preloaded_csets) | ||
| 1865 | { | ||
| 1866 | LIST_HEAD(csets); | ||
| 1867 | struct css_set *src_cset; | ||
| 1868 | |||
| 1869 | lockdep_assert_held(&cgroup_mutex); | ||
| 1870 | |||
| 1871 | /* look up the dst cset for each src cset and link it to src */ | ||
| 1872 | list_for_each_entry(src_cset, preloaded_csets, mg_preload_node) { | ||
| 1873 | struct css_set *dst_cset; | ||
| 1874 | |||
| 1875 | dst_cset = find_css_set(src_cset, dst_cgrp); | ||
| 1876 | if (!dst_cset) | ||
| 1877 | goto err; | ||
| 1878 | |||
| 1879 | WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); | ||
| 1880 | src_cset->mg_dst_cset = dst_cset; | ||
| 1881 | |||
| 1882 | if (list_empty(&dst_cset->mg_preload_node)) | ||
| 1883 | list_add(&dst_cset->mg_preload_node, &csets); | ||
| 1884 | else | ||
| 1885 | put_css_set(dst_cset, false); | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | list_splice(&csets, preloaded_csets); | ||
| 1889 | return 0; | ||
| 1890 | err: | ||
| 1891 | cgroup_migrate_finish(&csets); | ||
| 1892 | return -ENOMEM; | ||
| 1893 | } | ||
| 1894 | |||
| 1895 | /** | ||
| 1896 | * cgroup_migrate - migrate a process or task to a cgroup | ||
| 1897 | * @cgrp: the destination cgroup | ||
| 1898 | * @leader: the leader of the process or the task to migrate | ||
| 1899 | * @threadgroup: whether @leader points to the whole process or a single task | ||
| 1900 | * | ||
| 1901 | * Migrate a process or task denoted by @leader to @cgrp. If migrating a | ||
| 1902 | * process, the caller must be holding threadgroup_lock of @leader. The | ||
| 1903 | * caller is also responsible for invoking cgroup_migrate_add_src() and | ||
| 1904 | * cgroup_migrate_prepare_dst() on the targets before invoking this | ||
| 1905 | * function and following up with cgroup_migrate_finish(). | ||
| 1906 | * | ||
| 1907 | * As long as a controller's ->can_attach() doesn't fail, this function is | ||
| 1908 | * guaranteed to succeed. This means that, excluding ->can_attach() | ||
| 1909 | * failure, when migrating multiple targets, the success or failure can be | ||
| 1910 | * decided for all targets by invoking group_migrate_prepare_dst() before | ||
| 1911 | * actually starting migrating. | ||
| 1912 | */ | ||
| 1913 | static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, | ||
| 1914 | bool threadgroup) | ||
| 1915 | { | ||
| 1916 | struct cgroup_taskset tset = { | ||
| 1917 | .src_csets = LIST_HEAD_INIT(tset.src_csets), | ||
| 1918 | .dst_csets = LIST_HEAD_INIT(tset.dst_csets), | ||
| 1919 | .csets = &tset.src_csets, | ||
| 1920 | }; | ||
| 1921 | struct cgroup_subsys_state *css, *failed_css = NULL; | ||
| 1922 | struct css_set *cset, *tmp_cset; | ||
| 1923 | struct task_struct *task, *tmp_task; | ||
| 1924 | int i, ret; | ||
| 1995 | 1925 | ||
| 1996 | i = 0; | ||
| 1997 | /* | 1926 | /* |
| 1998 | * Prevent freeing of tasks while we take a snapshot. Tasks that are | 1927 | * Prevent freeing of tasks while we take a snapshot. Tasks that are |
| 1999 | * already PF_EXITING could be freed from underneath us unless we | 1928 | * already PF_EXITING could be freed from underneath us unless we |
| 2000 | * take an rcu_read_lock. | 1929 | * take an rcu_read_lock. |
| 2001 | */ | 1930 | */ |
| 1931 | down_write(&css_set_rwsem); | ||
| 2002 | rcu_read_lock(); | 1932 | rcu_read_lock(); |
| 1933 | task = leader; | ||
| 2003 | do { | 1934 | do { |
| 2004 | struct task_and_cgroup ent; | 1935 | /* @task either already exited or can't exit until the end */ |
| 1936 | if (task->flags & PF_EXITING) | ||
| 1937 | goto next; | ||
| 2005 | 1938 | ||
| 2006 | /* @tsk either already exited or can't exit until the end */ | 1939 | /* leave @task alone if post_fork() hasn't linked it yet */ |
| 2007 | if (tsk->flags & PF_EXITING) | 1940 | if (list_empty(&task->cg_list)) |
| 2008 | goto next; | 1941 | goto next; |
| 2009 | 1942 | ||
| 2010 | /* as per above, nr_threads may decrease, but not increase. */ | 1943 | cset = task_css_set(task); |
| 2011 | BUG_ON(i >= group_size); | 1944 | if (!cset->mg_src_cgrp) |
| 2012 | ent.task = tsk; | ||
| 2013 | ent.cgrp = task_cgroup_from_root(tsk, root); | ||
| 2014 | /* nothing to do if this task is already in the cgroup */ | ||
| 2015 | if (ent.cgrp == cgrp) | ||
| 2016 | goto next; | 1945 | goto next; |
| 1946 | |||
| 2017 | /* | 1947 | /* |
| 2018 | * saying GFP_ATOMIC has no effect here because we did prealloc | 1948 | * cgroup_taskset_first() must always return the leader. |
| 2019 | * earlier, but it's good form to communicate our expectations. | 1949 | * Take care to avoid disturbing the ordering. |
| 2020 | */ | 1950 | */ |
| 2021 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 1951 | list_move_tail(&task->cg_list, &cset->mg_tasks); |
| 2022 | BUG_ON(retval != 0); | 1952 | if (list_empty(&cset->mg_node)) |
| 2023 | i++; | 1953 | list_add_tail(&cset->mg_node, &tset.src_csets); |
| 1954 | if (list_empty(&cset->mg_dst_cset->mg_node)) | ||
| 1955 | list_move_tail(&cset->mg_dst_cset->mg_node, | ||
| 1956 | &tset.dst_csets); | ||
| 2024 | next: | 1957 | next: |
| 2025 | if (!threadgroup) | 1958 | if (!threadgroup) |
| 2026 | break; | 1959 | break; |
| 2027 | } while_each_thread(leader, tsk); | 1960 | } while_each_thread(leader, task); |
| 2028 | rcu_read_unlock(); | 1961 | rcu_read_unlock(); |
| 2029 | /* remember the number of threads in the array for later. */ | 1962 | up_write(&css_set_rwsem); |
| 2030 | group_size = i; | ||
| 2031 | tset.tc_array = group; | ||
| 2032 | tset.tc_array_len = group_size; | ||
| 2033 | 1963 | ||
| 2034 | /* methods shouldn't be called if no task is actually migrating */ | 1964 | /* methods shouldn't be called if no task is actually migrating */ |
| 2035 | retval = 0; | 1965 | if (list_empty(&tset.src_csets)) |
| 2036 | if (!group_size) | 1966 | return 0; |
| 2037 | goto out_free_group_list; | ||
| 2038 | 1967 | ||
| 2039 | /* | 1968 | /* check that we can legitimately attach to the cgroup */ |
| 2040 | * step 1: check that we can legitimately attach to the cgroup. | ||
| 2041 | */ | ||
| 2042 | for_each_css(css, i, cgrp) { | 1969 | for_each_css(css, i, cgrp) { |
| 2043 | if (css->ss->can_attach) { | 1970 | if (css->ss->can_attach) { |
| 2044 | retval = css->ss->can_attach(css, &tset); | 1971 | ret = css->ss->can_attach(css, &tset); |
| 2045 | if (retval) { | 1972 | if (ret) { |
| 2046 | failed_css = css; | 1973 | failed_css = css; |
| 2047 | goto out_cancel_attach; | 1974 | goto out_cancel_attach; |
| 2048 | } | 1975 | } |
| @@ -2050,70 +1977,91 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
| 2050 | } | 1977 | } |
| 2051 | 1978 | ||
| 2052 | /* | 1979 | /* |
| 2053 | * step 2: make sure css_sets exist for all threads to be migrated. | 1980 | * Now that we're guaranteed success, proceed to move all tasks to |
| 2054 | * we use find_css_set, which allocates a new one if necessary. | 1981 | * the new cgroup. There are no failure cases after here, so this |
| 1982 | * is the commit point. | ||
| 2055 | */ | 1983 | */ |
| 2056 | for (i = 0; i < group_size; i++) { | 1984 | down_write(&css_set_rwsem); |
| 2057 | struct css_set *old_cset; | 1985 | list_for_each_entry(cset, &tset.src_csets, mg_node) { |
| 2058 | 1986 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) | |
| 2059 | tc = flex_array_get(group, i); | 1987 | cgroup_task_migrate(cset->mg_src_cgrp, task, |
| 2060 | old_cset = task_css_set(tc->task); | 1988 | cset->mg_dst_cset); |
| 2061 | tc->cset = find_css_set(old_cset, cgrp); | ||
| 2062 | if (!tc->cset) { | ||
| 2063 | retval = -ENOMEM; | ||
| 2064 | goto out_put_css_set_refs; | ||
| 2065 | } | ||
| 2066 | } | 1989 | } |
| 1990 | up_write(&css_set_rwsem); | ||
| 2067 | 1991 | ||
| 2068 | /* | 1992 | /* |
| 2069 | * step 3: now that we're guaranteed success wrt the css_sets, | 1993 | * Migration is committed, all target tasks are now on dst_csets. |
| 2070 | * proceed to move all tasks to the new cgroup. There are no | 1994 | * Nothing is sensitive to fork() after this point. Notify |
| 2071 | * failure cases after here, so this is the commit point. | 1995 | * controllers that migration is complete. |
| 2072 | */ | 1996 | */ |
| 2073 | for (i = 0; i < group_size; i++) { | 1997 | tset.csets = &tset.dst_csets; |
| 2074 | tc = flex_array_get(group, i); | ||
| 2075 | cgroup_task_migrate(tc->cgrp, tc->task, tc->cset); | ||
| 2076 | } | ||
| 2077 | /* nothing is sensitive to fork() after this point. */ | ||
| 2078 | 1998 | ||
| 2079 | /* | ||
| 2080 | * step 4: do subsystem attach callbacks. | ||
| 2081 | */ | ||
| 2082 | for_each_css(css, i, cgrp) | 1999 | for_each_css(css, i, cgrp) |
| 2083 | if (css->ss->attach) | 2000 | if (css->ss->attach) |
| 2084 | css->ss->attach(css, &tset); | 2001 | css->ss->attach(css, &tset); |
| 2085 | 2002 | ||
| 2086 | /* | 2003 | ret = 0; |
| 2087 | * step 5: success! and cleanup | 2004 | goto out_release_tset; |
| 2088 | */ | 2005 | |
| 2089 | retval = 0; | ||
| 2090 | out_put_css_set_refs: | ||
| 2091 | if (retval) { | ||
| 2092 | for (i = 0; i < group_size; i++) { | ||
| 2093 | tc = flex_array_get(group, i); | ||
| 2094 | if (!tc->cset) | ||
| 2095 | break; | ||
| 2096 | put_css_set(tc->cset); | ||
| 2097 | } | ||
| 2098 | } | ||
| 2099 | out_cancel_attach: | 2006 | out_cancel_attach: |
| 2100 | if (retval) { | 2007 | for_each_css(css, i, cgrp) { |
| 2101 | for_each_css(css, i, cgrp) { | 2008 | if (css == failed_css) |
| 2102 | if (css == failed_css) | 2009 | break; |
| 2103 | break; | 2010 | if (css->ss->cancel_attach) |
| 2104 | if (css->ss->cancel_attach) | 2011 | css->ss->cancel_attach(css, &tset); |
| 2105 | css->ss->cancel_attach(css, &tset); | ||
| 2106 | } | ||
| 2107 | } | 2012 | } |
| 2108 | out_free_group_list: | 2013 | out_release_tset: |
| 2109 | flex_array_free(group); | 2014 | down_write(&css_set_rwsem); |
| 2110 | return retval; | 2015 | list_splice_init(&tset.dst_csets, &tset.src_csets); |
| 2016 | list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { | ||
| 2017 | list_splice_tail_init(&cset->mg_tasks, &cset->tasks); | ||
| 2018 | list_del_init(&cset->mg_node); | ||
| 2019 | } | ||
| 2020 | up_write(&css_set_rwsem); | ||
| 2021 | return ret; | ||
| 2022 | } | ||
| 2023 | |||
| 2024 | /** | ||
| 2025 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | ||
| 2026 | * @dst_cgrp: the cgroup to attach to | ||
| 2027 | * @leader: the task or the leader of the threadgroup to be attached | ||
| 2028 | * @threadgroup: attach the whole threadgroup? | ||
| 2029 | * | ||
| 2030 | * Call holding cgroup_mutex and threadgroup_lock of @leader. | ||
| 2031 | */ | ||
| 2032 | static int cgroup_attach_task(struct cgroup *dst_cgrp, | ||
| 2033 | struct task_struct *leader, bool threadgroup) | ||
| 2034 | { | ||
| 2035 | LIST_HEAD(preloaded_csets); | ||
| 2036 | struct task_struct *task; | ||
| 2037 | int ret; | ||
| 2038 | |||
| 2039 | /* look up all src csets */ | ||
| 2040 | down_read(&css_set_rwsem); | ||
| 2041 | rcu_read_lock(); | ||
| 2042 | task = leader; | ||
| 2043 | do { | ||
| 2044 | cgroup_migrate_add_src(task_css_set(task), dst_cgrp, | ||
| 2045 | &preloaded_csets); | ||
| 2046 | if (!threadgroup) | ||
| 2047 | break; | ||
| 2048 | } while_each_thread(leader, task); | ||
| 2049 | rcu_read_unlock(); | ||
| 2050 | up_read(&css_set_rwsem); | ||
| 2051 | |||
| 2052 | /* prepare dst csets and commit */ | ||
| 2053 | ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets); | ||
| 2054 | if (!ret) | ||
| 2055 | ret = cgroup_migrate(dst_cgrp, leader, threadgroup); | ||
| 2056 | |||
| 2057 | cgroup_migrate_finish(&preloaded_csets); | ||
| 2058 | return ret; | ||
| 2111 | } | 2059 | } |
| 2112 | 2060 | ||
| 2113 | /* | 2061 | /* |
| 2114 | * Find the task_struct of the task to attach by vpid and pass it along to the | 2062 | * Find the task_struct of the task to attach by vpid and pass it along to the |
| 2115 | * function to attach either it or all tasks in its threadgroup. Will lock | 2063 | * function to attach either it or all tasks in its threadgroup. Will lock |
| 2116 | * cgroup_mutex and threadgroup; may take task_lock of task. | 2064 | * cgroup_mutex and threadgroup. |
| 2117 | */ | 2065 | */ |
| 2118 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | 2066 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) |
| 2119 | { | 2067 | { |
| @@ -2198,12 +2146,19 @@ out_unlock_cgroup: | |||
| 2198 | */ | 2146 | */ |
| 2199 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | 2147 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| 2200 | { | 2148 | { |
| 2201 | struct cgroupfs_root *root; | 2149 | struct cgroup_root *root; |
| 2202 | int retval = 0; | 2150 | int retval = 0; |
| 2203 | 2151 | ||
| 2204 | mutex_lock(&cgroup_mutex); | 2152 | mutex_lock(&cgroup_mutex); |
| 2205 | for_each_active_root(root) { | 2153 | for_each_root(root) { |
| 2206 | struct cgroup *from_cgrp = task_cgroup_from_root(from, root); | 2154 | struct cgroup *from_cgrp; |
| 2155 | |||
| 2156 | if (root == &cgrp_dfl_root) | ||
| 2157 | continue; | ||
| 2158 | |||
| 2159 | down_read(&css_set_rwsem); | ||
| 2160 | from_cgrp = task_cgroup_from_root(from, root); | ||
| 2161 | up_read(&css_set_rwsem); | ||
| 2207 | 2162 | ||
| 2208 | retval = cgroup_attach_task(from_cgrp, tsk, false); | 2163 | retval = cgroup_attach_task(from_cgrp, tsk, false); |
| 2209 | if (retval) | 2164 | if (retval) |
| @@ -2228,16 +2183,17 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css, | |||
| 2228 | } | 2183 | } |
| 2229 | 2184 | ||
| 2230 | static int cgroup_release_agent_write(struct cgroup_subsys_state *css, | 2185 | static int cgroup_release_agent_write(struct cgroup_subsys_state *css, |
| 2231 | struct cftype *cft, const char *buffer) | 2186 | struct cftype *cft, char *buffer) |
| 2232 | { | 2187 | { |
| 2233 | BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX); | 2188 | struct cgroup_root *root = css->cgroup->root; |
| 2234 | if (strlen(buffer) >= PATH_MAX) | 2189 | |
| 2235 | return -EINVAL; | 2190 | BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX); |
| 2236 | if (!cgroup_lock_live_group(css->cgroup)) | 2191 | if (!cgroup_lock_live_group(css->cgroup)) |
| 2237 | return -ENODEV; | 2192 | return -ENODEV; |
| 2238 | mutex_lock(&cgroup_root_mutex); | 2193 | spin_lock(&release_agent_path_lock); |
| 2239 | strcpy(css->cgroup->root->release_agent_path, buffer); | 2194 | strlcpy(root->release_agent_path, buffer, |
| 2240 | mutex_unlock(&cgroup_root_mutex); | 2195 | sizeof(root->release_agent_path)); |
| 2196 | spin_unlock(&release_agent_path_lock); | ||
| 2241 | mutex_unlock(&cgroup_mutex); | 2197 | mutex_unlock(&cgroup_mutex); |
| 2242 | return 0; | 2198 | return 0; |
| 2243 | } | 2199 | } |
| @@ -2262,32 +2218,23 @@ static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) | |||
| 2262 | return 0; | 2218 | return 0; |
| 2263 | } | 2219 | } |
| 2264 | 2220 | ||
| 2265 | /* A buffer size big enough for numbers or short strings */ | 2221 | static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, |
| 2266 | #define CGROUP_LOCAL_BUFFER_SIZE 64 | 2222 | size_t nbytes, loff_t off) |
| 2267 | |||
| 2268 | static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, | ||
| 2269 | size_t nbytes, loff_t *ppos) | ||
| 2270 | { | 2223 | { |
| 2271 | struct cfent *cfe = __d_cfe(file->f_dentry); | 2224 | struct cgroup *cgrp = of->kn->parent->priv; |
| 2272 | struct cftype *cft = __d_cft(file->f_dentry); | 2225 | struct cftype *cft = of->kn->priv; |
| 2273 | struct cgroup_subsys_state *css = cfe->css; | 2226 | struct cgroup_subsys_state *css; |
| 2274 | size_t max_bytes = cft->max_write_len ?: CGROUP_LOCAL_BUFFER_SIZE - 1; | ||
| 2275 | char *buf; | ||
| 2276 | int ret; | 2227 | int ret; |
| 2277 | 2228 | ||
| 2278 | if (nbytes >= max_bytes) | 2229 | /* |
| 2279 | return -E2BIG; | 2230 | * kernfs guarantees that a file isn't deleted with operations in |
| 2280 | 2231 | * flight, which means that the matching css is and stays alive and | |
| 2281 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | 2232 | * doesn't need to be pinned. The RCU locking is not necessary |
| 2282 | if (!buf) | 2233 | * either. It's just for the convenience of using cgroup_css(). |
| 2283 | return -ENOMEM; | 2234 | */ |
| 2284 | 2235 | rcu_read_lock(); | |
| 2285 | if (copy_from_user(buf, userbuf, nbytes)) { | 2236 | css = cgroup_css(cgrp, cft->ss); |
| 2286 | ret = -EFAULT; | 2237 | rcu_read_unlock(); |
| 2287 | goto out_free; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | buf[nbytes] = '\0'; | ||
| 2291 | 2238 | ||
| 2292 | if (cft->write_string) { | 2239 | if (cft->write_string) { |
| 2293 | ret = cft->write_string(css, cft, strstrip(buf)); | 2240 | ret = cft->write_string(css, cft, strstrip(buf)); |
| @@ -2306,53 +2253,23 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, | |||
| 2306 | } else { | 2253 | } else { |
| 2307 | ret = -EINVAL; | 2254 | ret = -EINVAL; |
| 2308 | } | 2255 | } |
| 2309 | out_free: | 2256 | |
| 2310 | kfree(buf); | ||
| 2311 | return ret ?: nbytes; | 2257 | return ret ?: nbytes; |
| 2312 | } | 2258 | } |
| 2313 | 2259 | ||
| 2314 | /* | ||
| 2315 | * seqfile ops/methods for returning structured data. Currently just | ||
| 2316 | * supports string->u64 maps, but can be extended in future. | ||
| 2317 | */ | ||
| 2318 | |||
| 2319 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) | 2260 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
| 2320 | { | 2261 | { |
| 2321 | struct cftype *cft = seq_cft(seq); | 2262 | return seq_cft(seq)->seq_start(seq, ppos); |
| 2322 | |||
| 2323 | if (cft->seq_start) { | ||
| 2324 | return cft->seq_start(seq, ppos); | ||
| 2325 | } else { | ||
| 2326 | /* | ||
| 2327 | * The same behavior and code as single_open(). Returns | ||
| 2328 | * !NULL if pos is at the beginning; otherwise, NULL. | ||
| 2329 | */ | ||
| 2330 | return NULL + !*ppos; | ||
| 2331 | } | ||
| 2332 | } | 2263 | } |
| 2333 | 2264 | ||
| 2334 | static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) | 2265 | static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) |
| 2335 | { | 2266 | { |
| 2336 | struct cftype *cft = seq_cft(seq); | 2267 | return seq_cft(seq)->seq_next(seq, v, ppos); |
| 2337 | |||
| 2338 | if (cft->seq_next) { | ||
| 2339 | return cft->seq_next(seq, v, ppos); | ||
| 2340 | } else { | ||
| 2341 | /* | ||
| 2342 | * The same behavior and code as single_open(), always | ||
| 2343 | * terminate after the initial read. | ||
| 2344 | */ | ||
| 2345 | ++*ppos; | ||
| 2346 | return NULL; | ||
| 2347 | } | ||
| 2348 | } | 2268 | } |
| 2349 | 2269 | ||
| 2350 | static void cgroup_seqfile_stop(struct seq_file *seq, void *v) | 2270 | static void cgroup_seqfile_stop(struct seq_file *seq, void *v) |
| 2351 | { | 2271 | { |
| 2352 | struct cftype *cft = seq_cft(seq); | 2272 | seq_cft(seq)->seq_stop(seq, v); |
| 2353 | |||
| 2354 | if (cft->seq_stop) | ||
| 2355 | cft->seq_stop(seq, v); | ||
| 2356 | } | 2273 | } |
| 2357 | 2274 | ||
| 2358 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) | 2275 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) |
| @@ -2372,96 +2289,35 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) | |||
| 2372 | return 0; | 2289 | return 0; |
| 2373 | } | 2290 | } |
| 2374 | 2291 | ||
| 2375 | static struct seq_operations cgroup_seq_operations = { | 2292 | static struct kernfs_ops cgroup_kf_single_ops = { |
| 2376 | .start = cgroup_seqfile_start, | 2293 | .atomic_write_len = PAGE_SIZE, |
| 2377 | .next = cgroup_seqfile_next, | 2294 | .write = cgroup_file_write, |
| 2378 | .stop = cgroup_seqfile_stop, | 2295 | .seq_show = cgroup_seqfile_show, |
| 2379 | .show = cgroup_seqfile_show, | ||
| 2380 | }; | 2296 | }; |
| 2381 | 2297 | ||
| 2382 | static int cgroup_file_open(struct inode *inode, struct file *file) | 2298 | static struct kernfs_ops cgroup_kf_ops = { |
| 2383 | { | 2299 | .atomic_write_len = PAGE_SIZE, |
| 2384 | struct cfent *cfe = __d_cfe(file->f_dentry); | 2300 | .write = cgroup_file_write, |
| 2385 | struct cftype *cft = __d_cft(file->f_dentry); | 2301 | .seq_start = cgroup_seqfile_start, |
| 2386 | struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent); | 2302 | .seq_next = cgroup_seqfile_next, |
| 2387 | struct cgroup_subsys_state *css; | 2303 | .seq_stop = cgroup_seqfile_stop, |
| 2388 | struct cgroup_open_file *of; | 2304 | .seq_show = cgroup_seqfile_show, |
| 2389 | int err; | 2305 | }; |
| 2390 | |||
| 2391 | err = generic_file_open(inode, file); | ||
| 2392 | if (err) | ||
| 2393 | return err; | ||
| 2394 | |||
| 2395 | /* | ||
| 2396 | * If the file belongs to a subsystem, pin the css. Will be | ||
| 2397 | * unpinned either on open failure or release. This ensures that | ||
| 2398 | * @css stays alive for all file operations. | ||
| 2399 | */ | ||
| 2400 | rcu_read_lock(); | ||
| 2401 | css = cgroup_css(cgrp, cft->ss); | ||
| 2402 | if (cft->ss && !css_tryget(css)) | ||
| 2403 | css = NULL; | ||
| 2404 | rcu_read_unlock(); | ||
| 2405 | |||
| 2406 | if (!css) | ||
| 2407 | return -ENODEV; | ||
| 2408 | |||
| 2409 | /* | ||
| 2410 | * @cfe->css is used by read/write/close to determine the | ||
| 2411 | * associated css. @file->private_data would be a better place but | ||
| 2412 | * that's already used by seqfile. Multiple accessors may use it | ||
| 2413 | * simultaneously which is okay as the association never changes. | ||
| 2414 | */ | ||
| 2415 | WARN_ON_ONCE(cfe->css && cfe->css != css); | ||
| 2416 | cfe->css = css; | ||
| 2417 | |||
| 2418 | of = __seq_open_private(file, &cgroup_seq_operations, | ||
| 2419 | sizeof(struct cgroup_open_file)); | ||
| 2420 | if (of) { | ||
| 2421 | of->cfe = cfe; | ||
| 2422 | return 0; | ||
| 2423 | } | ||
| 2424 | |||
| 2425 | if (css->ss) | ||
| 2426 | css_put(css); | ||
| 2427 | return -ENOMEM; | ||
| 2428 | } | ||
| 2429 | |||
| 2430 | static int cgroup_file_release(struct inode *inode, struct file *file) | ||
| 2431 | { | ||
| 2432 | struct cfent *cfe = __d_cfe(file->f_dentry); | ||
| 2433 | struct cgroup_subsys_state *css = cfe->css; | ||
| 2434 | |||
| 2435 | if (css->ss) | ||
| 2436 | css_put(css); | ||
| 2437 | return seq_release_private(inode, file); | ||
| 2438 | } | ||
| 2439 | 2306 | ||
| 2440 | /* | 2307 | /* |
| 2441 | * cgroup_rename - Only allow simple rename of directories in place. | 2308 | * cgroup_rename - Only allow simple rename of directories in place. |
| 2442 | */ | 2309 | */ |
| 2443 | static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | 2310 | static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, |
| 2444 | struct inode *new_dir, struct dentry *new_dentry) | 2311 | const char *new_name_str) |
| 2445 | { | 2312 | { |
| 2313 | struct cgroup *cgrp = kn->priv; | ||
| 2446 | int ret; | 2314 | int ret; |
| 2447 | struct cgroup_name *name, *old_name; | ||
| 2448 | struct cgroup *cgrp; | ||
| 2449 | |||
| 2450 | /* | ||
| 2451 | * It's convinient to use parent dir's i_mutex to protected | ||
| 2452 | * cgrp->name. | ||
| 2453 | */ | ||
| 2454 | lockdep_assert_held(&old_dir->i_mutex); | ||
| 2455 | 2315 | ||
| 2456 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) | 2316 | if (kernfs_type(kn) != KERNFS_DIR) |
| 2457 | return -ENOTDIR; | 2317 | return -ENOTDIR; |
| 2458 | if (new_dentry->d_inode) | 2318 | if (kn->parent != new_parent) |
| 2459 | return -EEXIST; | ||
| 2460 | if (old_dir != new_dir) | ||
| 2461 | return -EIO; | 2319 | return -EIO; |
| 2462 | 2320 | ||
| 2463 | cgrp = __d_cgrp(old_dentry); | ||
| 2464 | |||
| 2465 | /* | 2321 | /* |
| 2466 | * This isn't a proper migration and its usefulness is very | 2322 | * This isn't a proper migration and its usefulness is very |
| 2467 | * limited. Disallow if sane_behavior. | 2323 | * limited. Disallow if sane_behavior. |
| @@ -2469,218 +2325,61 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 2469 | if (cgroup_sane_behavior(cgrp)) | 2325 | if (cgroup_sane_behavior(cgrp)) |
| 2470 | return -EPERM; | 2326 | return -EPERM; |
| 2471 | 2327 | ||
| 2472 | name = cgroup_alloc_name(new_dentry); | 2328 | /* |
| 2473 | if (!name) | 2329 | * We're gonna grab cgroup_tree_mutex which nests outside kernfs |
| 2474 | return -ENOMEM; | 2330 | * active_ref. kernfs_rename() doesn't require active_ref |
| 2475 | 2331 | * protection. Break them before grabbing cgroup_tree_mutex. | |
| 2476 | ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry); | 2332 | */ |
| 2477 | if (ret) { | 2333 | kernfs_break_active_protection(new_parent); |
| 2478 | kfree(name); | 2334 | kernfs_break_active_protection(kn); |
| 2479 | return ret; | ||
| 2480 | } | ||
| 2481 | |||
| 2482 | old_name = rcu_dereference_protected(cgrp->name, true); | ||
| 2483 | rcu_assign_pointer(cgrp->name, name); | ||
| 2484 | |||
| 2485 | kfree_rcu(old_name, rcu_head); | ||
| 2486 | return 0; | ||
| 2487 | } | ||
| 2488 | |||
| 2489 | static struct simple_xattrs *__d_xattrs(struct dentry *dentry) | ||
| 2490 | { | ||
| 2491 | if (S_ISDIR(dentry->d_inode->i_mode)) | ||
| 2492 | return &__d_cgrp(dentry)->xattrs; | ||
| 2493 | else | ||
| 2494 | return &__d_cfe(dentry)->xattrs; | ||
| 2495 | } | ||
| 2496 | |||
| 2497 | static inline int xattr_enabled(struct dentry *dentry) | ||
| 2498 | { | ||
| 2499 | struct cgroupfs_root *root = dentry->d_sb->s_fs_info; | ||
| 2500 | return root->flags & CGRP_ROOT_XATTR; | ||
| 2501 | } | ||
| 2502 | |||
| 2503 | static bool is_valid_xattr(const char *name) | ||
| 2504 | { | ||
| 2505 | if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || | ||
| 2506 | !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) | ||
| 2507 | return true; | ||
| 2508 | return false; | ||
| 2509 | } | ||
| 2510 | |||
| 2511 | static int cgroup_setxattr(struct dentry *dentry, const char *name, | ||
| 2512 | const void *val, size_t size, int flags) | ||
| 2513 | { | ||
| 2514 | if (!xattr_enabled(dentry)) | ||
| 2515 | return -EOPNOTSUPP; | ||
| 2516 | if (!is_valid_xattr(name)) | ||
| 2517 | return -EINVAL; | ||
| 2518 | return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags); | ||
| 2519 | } | ||
| 2520 | |||
| 2521 | static int cgroup_removexattr(struct dentry *dentry, const char *name) | ||
| 2522 | { | ||
| 2523 | if (!xattr_enabled(dentry)) | ||
| 2524 | return -EOPNOTSUPP; | ||
| 2525 | if (!is_valid_xattr(name)) | ||
| 2526 | return -EINVAL; | ||
| 2527 | return simple_xattr_remove(__d_xattrs(dentry), name); | ||
| 2528 | } | ||
| 2529 | |||
| 2530 | static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name, | ||
| 2531 | void *buf, size_t size) | ||
| 2532 | { | ||
| 2533 | if (!xattr_enabled(dentry)) | ||
| 2534 | return -EOPNOTSUPP; | ||
| 2535 | if (!is_valid_xattr(name)) | ||
| 2536 | return -EINVAL; | ||
| 2537 | return simple_xattr_get(__d_xattrs(dentry), name, buf, size); | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size) | ||
| 2541 | { | ||
| 2542 | if (!xattr_enabled(dentry)) | ||
| 2543 | return -EOPNOTSUPP; | ||
| 2544 | return simple_xattr_list(__d_xattrs(dentry), buf, size); | ||
| 2545 | } | ||
| 2546 | |||
| 2547 | static const struct file_operations cgroup_file_operations = { | ||
| 2548 | .read = seq_read, | ||
| 2549 | .write = cgroup_file_write, | ||
| 2550 | .llseek = generic_file_llseek, | ||
| 2551 | .open = cgroup_file_open, | ||
| 2552 | .release = cgroup_file_release, | ||
| 2553 | }; | ||
| 2554 | |||
| 2555 | static const struct inode_operations cgroup_file_inode_operations = { | ||
| 2556 | .setxattr = cgroup_setxattr, | ||
| 2557 | .getxattr = cgroup_getxattr, | ||
| 2558 | .listxattr = cgroup_listxattr, | ||
| 2559 | .removexattr = cgroup_removexattr, | ||
| 2560 | }; | ||
| 2561 | |||
| 2562 | static const struct inode_operations cgroup_dir_inode_operations = { | ||
| 2563 | .lookup = simple_lookup, | ||
| 2564 | .mkdir = cgroup_mkdir, | ||
| 2565 | .rmdir = cgroup_rmdir, | ||
| 2566 | .rename = cgroup_rename, | ||
| 2567 | .setxattr = cgroup_setxattr, | ||
| 2568 | .getxattr = cgroup_getxattr, | ||
| 2569 | .listxattr = cgroup_listxattr, | ||
| 2570 | .removexattr = cgroup_removexattr, | ||
| 2571 | }; | ||
| 2572 | |||
| 2573 | static int cgroup_create_file(struct dentry *dentry, umode_t mode, | ||
| 2574 | struct super_block *sb) | ||
| 2575 | { | ||
| 2576 | struct inode *inode; | ||
| 2577 | |||
| 2578 | if (!dentry) | ||
| 2579 | return -ENOENT; | ||
| 2580 | if (dentry->d_inode) | ||
| 2581 | return -EEXIST; | ||
| 2582 | 2335 | ||
| 2583 | inode = cgroup_new_inode(mode, sb); | 2336 | mutex_lock(&cgroup_tree_mutex); |
| 2584 | if (!inode) | 2337 | mutex_lock(&cgroup_mutex); |
| 2585 | return -ENOMEM; | ||
| 2586 | 2338 | ||
| 2587 | if (S_ISDIR(mode)) { | 2339 | ret = kernfs_rename(kn, new_parent, new_name_str); |
| 2588 | inode->i_op = &cgroup_dir_inode_operations; | ||
| 2589 | inode->i_fop = &simple_dir_operations; | ||
| 2590 | 2340 | ||
| 2591 | /* start off with i_nlink == 2 (for "." entry) */ | 2341 | mutex_unlock(&cgroup_mutex); |
| 2592 | inc_nlink(inode); | 2342 | mutex_unlock(&cgroup_tree_mutex); |
| 2593 | inc_nlink(dentry->d_parent->d_inode); | ||
| 2594 | 2343 | ||
| 2595 | /* | 2344 | kernfs_unbreak_active_protection(kn); |
| 2596 | * Control reaches here with cgroup_mutex held. | 2345 | kernfs_unbreak_active_protection(new_parent); |
| 2597 | * @inode->i_mutex should nest outside cgroup_mutex but we | 2346 | return ret; |
| 2598 | * want to populate it immediately without releasing | ||
| 2599 | * cgroup_mutex. As @inode isn't visible to anyone else | ||
| 2600 | * yet, trylock will always succeed without affecting | ||
| 2601 | * lockdep checks. | ||
| 2602 | */ | ||
| 2603 | WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex)); | ||
| 2604 | } else if (S_ISREG(mode)) { | ||
| 2605 | inode->i_size = 0; | ||
| 2606 | inode->i_fop = &cgroup_file_operations; | ||
| 2607 | inode->i_op = &cgroup_file_inode_operations; | ||
| 2608 | } | ||
| 2609 | d_instantiate(dentry, inode); | ||
| 2610 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
| 2611 | return 0; | ||
| 2612 | } | 2347 | } |
| 2613 | 2348 | ||
| 2614 | /** | 2349 | /* set uid and gid of cgroup dirs and files to that of the creator */ |
| 2615 | * cgroup_file_mode - deduce file mode of a control file | 2350 | static int cgroup_kn_set_ugid(struct kernfs_node *kn) |
| 2616 | * @cft: the control file in question | ||
| 2617 | * | ||
| 2618 | * returns cft->mode if ->mode is not 0 | ||
| 2619 | * returns S_IRUGO|S_IWUSR if it has both a read and a write handler | ||
| 2620 | * returns S_IRUGO if it has only a read handler | ||
| 2621 | * returns S_IWUSR if it has only a write hander | ||
| 2622 | */ | ||
| 2623 | static umode_t cgroup_file_mode(const struct cftype *cft) | ||
| 2624 | { | 2351 | { |
| 2625 | umode_t mode = 0; | 2352 | struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, |
| 2626 | 2353 | .ia_uid = current_fsuid(), | |
| 2627 | if (cft->mode) | 2354 | .ia_gid = current_fsgid(), }; |
| 2628 | return cft->mode; | ||
| 2629 | |||
| 2630 | if (cft->read_u64 || cft->read_s64 || cft->seq_show) | ||
| 2631 | mode |= S_IRUGO; | ||
| 2632 | 2355 | ||
| 2633 | if (cft->write_u64 || cft->write_s64 || cft->write_string || | 2356 | if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && |
| 2634 | cft->trigger) | 2357 | gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) |
| 2635 | mode |= S_IWUSR; | 2358 | return 0; |
| 2636 | 2359 | ||
| 2637 | return mode; | 2360 | return kernfs_setattr(kn, &iattr); |
| 2638 | } | 2361 | } |
| 2639 | 2362 | ||
| 2640 | static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) | 2363 | static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) |
| 2641 | { | 2364 | { |
| 2642 | struct dentry *dir = cgrp->dentry; | 2365 | char name[CGROUP_FILE_NAME_MAX]; |
| 2643 | struct cgroup *parent = __d_cgrp(dir); | 2366 | struct kernfs_node *kn; |
| 2644 | struct dentry *dentry; | 2367 | struct lock_class_key *key = NULL; |
| 2645 | struct cfent *cfe; | 2368 | int ret; |
| 2646 | int error; | ||
| 2647 | umode_t mode; | ||
| 2648 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; | ||
| 2649 | |||
| 2650 | if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && | ||
| 2651 | !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { | ||
| 2652 | strcpy(name, cft->ss->name); | ||
| 2653 | strcat(name, "."); | ||
| 2654 | } | ||
| 2655 | strcat(name, cft->name); | ||
| 2656 | |||
| 2657 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); | ||
| 2658 | |||
| 2659 | cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); | ||
| 2660 | if (!cfe) | ||
| 2661 | return -ENOMEM; | ||
| 2662 | |||
| 2663 | dentry = lookup_one_len(name, dir, strlen(name)); | ||
| 2664 | if (IS_ERR(dentry)) { | ||
| 2665 | error = PTR_ERR(dentry); | ||
| 2666 | goto out; | ||
| 2667 | } | ||
| 2668 | 2369 | ||
| 2669 | cfe->type = (void *)cft; | 2370 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 2670 | cfe->dentry = dentry; | 2371 | key = &cft->lockdep_key; |
| 2671 | dentry->d_fsdata = cfe; | 2372 | #endif |
| 2672 | simple_xattrs_init(&cfe->xattrs); | 2373 | kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), |
| 2374 | cgroup_file_mode(cft), 0, cft->kf_ops, cft, | ||
| 2375 | NULL, false, key); | ||
| 2376 | if (IS_ERR(kn)) | ||
| 2377 | return PTR_ERR(kn); | ||
| 2673 | 2378 | ||
| 2674 | mode = cgroup_file_mode(cft); | 2379 | ret = cgroup_kn_set_ugid(kn); |
| 2675 | error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); | 2380 | if (ret) |
| 2676 | if (!error) { | 2381 | kernfs_remove(kn); |
| 2677 | list_add_tail(&cfe->node, &parent->files); | 2382 | return ret; |
| 2678 | cfe = NULL; | ||
| 2679 | } | ||
| 2680 | dput(dentry); | ||
| 2681 | out: | ||
| 2682 | kfree(cfe); | ||
| 2683 | return error; | ||
| 2684 | } | 2383 | } |
| 2685 | 2384 | ||
| 2686 | /** | 2385 | /** |
| @@ -2700,11 +2399,12 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | |||
| 2700 | struct cftype *cft; | 2399 | struct cftype *cft; |
| 2701 | int ret; | 2400 | int ret; |
| 2702 | 2401 | ||
| 2703 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | 2402 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2704 | lockdep_assert_held(&cgroup_mutex); | ||
| 2705 | 2403 | ||
| 2706 | for (cft = cfts; cft->name[0] != '\0'; cft++) { | 2404 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| 2707 | /* does cft->flags tell us to skip this file on @cgrp? */ | 2405 | /* does cft->flags tell us to skip this file on @cgrp? */ |
| 2406 | if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) | ||
| 2407 | continue; | ||
| 2708 | if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) | 2408 | if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) |
| 2709 | continue; | 2409 | continue; |
| 2710 | if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) | 2410 | if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) |
| @@ -2726,44 +2426,19 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | |||
| 2726 | return 0; | 2426 | return 0; |
| 2727 | } | 2427 | } |
| 2728 | 2428 | ||
| 2729 | static void cgroup_cfts_prepare(void) | 2429 | static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) |
| 2730 | __acquires(&cgroup_mutex) | ||
| 2731 | { | ||
| 2732 | /* | ||
| 2733 | * Thanks to the entanglement with vfs inode locking, we can't walk | ||
| 2734 | * the existing cgroups under cgroup_mutex and create files. | ||
| 2735 | * Instead, we use css_for_each_descendant_pre() and drop RCU read | ||
| 2736 | * lock before calling cgroup_addrm_files(). | ||
| 2737 | */ | ||
| 2738 | mutex_lock(&cgroup_mutex); | ||
| 2739 | } | ||
| 2740 | |||
| 2741 | static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | ||
| 2742 | __releases(&cgroup_mutex) | ||
| 2743 | { | 2430 | { |
| 2744 | LIST_HEAD(pending); | 2431 | LIST_HEAD(pending); |
| 2745 | struct cgroup_subsys *ss = cfts[0].ss; | 2432 | struct cgroup_subsys *ss = cfts[0].ss; |
| 2746 | struct cgroup *root = &ss->root->top_cgroup; | 2433 | struct cgroup *root = &ss->root->cgrp; |
| 2747 | struct super_block *sb = ss->root->sb; | ||
| 2748 | struct dentry *prev = NULL; | ||
| 2749 | struct inode *inode; | ||
| 2750 | struct cgroup_subsys_state *css; | 2434 | struct cgroup_subsys_state *css; |
| 2751 | u64 update_before; | ||
| 2752 | int ret = 0; | 2435 | int ret = 0; |
| 2753 | 2436 | ||
| 2754 | /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ | 2437 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2755 | if (!cfts || ss->root == &cgroup_dummy_root || | ||
| 2756 | !atomic_inc_not_zero(&sb->s_active)) { | ||
| 2757 | mutex_unlock(&cgroup_mutex); | ||
| 2758 | return 0; | ||
| 2759 | } | ||
| 2760 | 2438 | ||
| 2761 | /* | 2439 | /* don't bother if @ss isn't attached */ |
| 2762 | * All cgroups which are created after we drop cgroup_mutex will | 2440 | if (ss->root == &cgrp_dfl_root) |
| 2763 | * have the updated set of files, so we only need to update the | 2441 | return 0; |
| 2764 | * cgroups created before the current @cgroup_serial_nr_next. | ||
| 2765 | */ | ||
| 2766 | update_before = cgroup_serial_nr_next; | ||
| 2767 | 2442 | ||
| 2768 | /* add/rm files for all cgroups created before */ | 2443 | /* add/rm files for all cgroups created before */ |
| 2769 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { | 2444 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
| @@ -2772,62 +2447,75 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
| 2772 | if (cgroup_is_dead(cgrp)) | 2447 | if (cgroup_is_dead(cgrp)) |
| 2773 | continue; | 2448 | continue; |
| 2774 | 2449 | ||
| 2775 | inode = cgrp->dentry->d_inode; | 2450 | ret = cgroup_addrm_files(cgrp, cfts, is_add); |
| 2776 | dget(cgrp->dentry); | ||
| 2777 | dput(prev); | ||
| 2778 | prev = cgrp->dentry; | ||
| 2779 | |||
| 2780 | mutex_unlock(&cgroup_mutex); | ||
| 2781 | mutex_lock(&inode->i_mutex); | ||
| 2782 | mutex_lock(&cgroup_mutex); | ||
| 2783 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) | ||
| 2784 | ret = cgroup_addrm_files(cgrp, cfts, is_add); | ||
| 2785 | mutex_unlock(&inode->i_mutex); | ||
| 2786 | if (ret) | 2451 | if (ret) |
| 2787 | break; | 2452 | break; |
| 2788 | } | 2453 | } |
| 2789 | mutex_unlock(&cgroup_mutex); | 2454 | |
| 2790 | dput(prev); | 2455 | if (is_add && !ret) |
| 2791 | deactivate_super(sb); | 2456 | kernfs_activate(root->kn); |
| 2792 | return ret; | 2457 | return ret; |
| 2793 | } | 2458 | } |
| 2794 | 2459 | ||
| 2795 | /** | 2460 | static void cgroup_exit_cftypes(struct cftype *cfts) |
| 2796 | * cgroup_add_cftypes - add an array of cftypes to a subsystem | ||
| 2797 | * @ss: target cgroup subsystem | ||
| 2798 | * @cfts: zero-length name terminated array of cftypes | ||
| 2799 | * | ||
| 2800 | * Register @cfts to @ss. Files described by @cfts are created for all | ||
| 2801 | * existing cgroups to which @ss is attached and all future cgroups will | ||
| 2802 | * have them too. This function can be called anytime whether @ss is | ||
| 2803 | * attached or not. | ||
| 2804 | * | ||
| 2805 | * Returns 0 on successful registration, -errno on failure. Note that this | ||
| 2806 | * function currently returns 0 as long as @cfts registration is successful | ||
| 2807 | * even if some file creation attempts on existing cgroups fail. | ||
| 2808 | */ | ||
| 2809 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2810 | { | 2461 | { |
| 2811 | struct cftype_set *set; | ||
| 2812 | struct cftype *cft; | 2462 | struct cftype *cft; |
| 2813 | int ret; | ||
| 2814 | 2463 | ||
| 2815 | set = kzalloc(sizeof(*set), GFP_KERNEL); | 2464 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| 2816 | if (!set) | 2465 | /* free copy for custom atomic_write_len, see init_cftypes() */ |
| 2817 | return -ENOMEM; | 2466 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) |
| 2467 | kfree(cft->kf_ops); | ||
| 2468 | cft->kf_ops = NULL; | ||
| 2469 | cft->ss = NULL; | ||
| 2470 | } | ||
| 2471 | } | ||
| 2472 | |||
| 2473 | static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2474 | { | ||
| 2475 | struct cftype *cft; | ||
| 2476 | |||
| 2477 | for (cft = cfts; cft->name[0] != '\0'; cft++) { | ||
| 2478 | struct kernfs_ops *kf_ops; | ||
| 2818 | 2479 | ||
| 2819 | for (cft = cfts; cft->name[0] != '\0'; cft++) | 2480 | WARN_ON(cft->ss || cft->kf_ops); |
| 2481 | |||
| 2482 | if (cft->seq_start) | ||
| 2483 | kf_ops = &cgroup_kf_ops; | ||
| 2484 | else | ||
| 2485 | kf_ops = &cgroup_kf_single_ops; | ||
| 2486 | |||
| 2487 | /* | ||
| 2488 | * Ugh... if @cft wants a custom max_write_len, we need to | ||
| 2489 | * make a copy of kf_ops to set its atomic_write_len. | ||
| 2490 | */ | ||
| 2491 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { | ||
| 2492 | kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); | ||
| 2493 | if (!kf_ops) { | ||
| 2494 | cgroup_exit_cftypes(cfts); | ||
| 2495 | return -ENOMEM; | ||
| 2496 | } | ||
| 2497 | kf_ops->atomic_write_len = cft->max_write_len; | ||
| 2498 | } | ||
| 2499 | |||
| 2500 | cft->kf_ops = kf_ops; | ||
| 2820 | cft->ss = ss; | 2501 | cft->ss = ss; |
| 2502 | } | ||
| 2821 | 2503 | ||
| 2822 | cgroup_cfts_prepare(); | 2504 | return 0; |
| 2823 | set->cfts = cfts; | 2505 | } |
| 2824 | list_add_tail(&set->node, &ss->cftsets); | 2506 | |
| 2825 | ret = cgroup_cfts_commit(cfts, true); | 2507 | static int cgroup_rm_cftypes_locked(struct cftype *cfts) |
| 2826 | if (ret) | 2508 | { |
| 2827 | cgroup_rm_cftypes(cfts); | 2509 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2828 | return ret; | 2510 | |
| 2511 | if (!cfts || !cfts[0].ss) | ||
| 2512 | return -ENOENT; | ||
| 2513 | |||
| 2514 | list_del(&cfts->node); | ||
| 2515 | cgroup_apply_cftypes(cfts, false); | ||
| 2516 | cgroup_exit_cftypes(cfts); | ||
| 2517 | return 0; | ||
| 2829 | } | 2518 | } |
| 2830 | EXPORT_SYMBOL_GPL(cgroup_add_cftypes); | ||
| 2831 | 2519 | ||
| 2832 | /** | 2520 | /** |
| 2833 | * cgroup_rm_cftypes - remove an array of cftypes from a subsystem | 2521 | * cgroup_rm_cftypes - remove an array of cftypes from a subsystem |
| @@ -2842,24 +2530,48 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes); | |||
| 2842 | */ | 2530 | */ |
| 2843 | int cgroup_rm_cftypes(struct cftype *cfts) | 2531 | int cgroup_rm_cftypes(struct cftype *cfts) |
| 2844 | { | 2532 | { |
| 2845 | struct cftype_set *set; | 2533 | int ret; |
| 2846 | 2534 | ||
| 2847 | if (!cfts || !cfts[0].ss) | 2535 | mutex_lock(&cgroup_tree_mutex); |
| 2848 | return -ENOENT; | 2536 | ret = cgroup_rm_cftypes_locked(cfts); |
| 2537 | mutex_unlock(&cgroup_tree_mutex); | ||
| 2538 | return ret; | ||
| 2539 | } | ||
| 2849 | 2540 | ||
| 2850 | cgroup_cfts_prepare(); | 2541 | /** |
| 2542 | * cgroup_add_cftypes - add an array of cftypes to a subsystem | ||
| 2543 | * @ss: target cgroup subsystem | ||
| 2544 | * @cfts: zero-length name terminated array of cftypes | ||
| 2545 | * | ||
| 2546 | * Register @cfts to @ss. Files described by @cfts are created for all | ||
| 2547 | * existing cgroups to which @ss is attached and all future cgroups will | ||
| 2548 | * have them too. This function can be called anytime whether @ss is | ||
| 2549 | * attached or not. | ||
| 2550 | * | ||
| 2551 | * Returns 0 on successful registration, -errno on failure. Note that this | ||
| 2552 | * function currently returns 0 as long as @cfts registration is successful | ||
| 2553 | * even if some file creation attempts on existing cgroups fail. | ||
| 2554 | */ | ||
| 2555 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2556 | { | ||
| 2557 | int ret; | ||
| 2851 | 2558 | ||
| 2852 | list_for_each_entry(set, &cfts[0].ss->cftsets, node) { | 2559 | if (!cfts || cfts[0].name[0] == '\0') |
| 2853 | if (set->cfts == cfts) { | 2560 | return 0; |
| 2854 | list_del(&set->node); | ||
| 2855 | kfree(set); | ||
| 2856 | cgroup_cfts_commit(cfts, false); | ||
| 2857 | return 0; | ||
| 2858 | } | ||
| 2859 | } | ||
| 2860 | 2561 | ||
| 2861 | cgroup_cfts_commit(NULL, false); | 2562 | ret = cgroup_init_cftypes(ss, cfts); |
| 2862 | return -ENOENT; | 2563 | if (ret) |
| 2564 | return ret; | ||
| 2565 | |||
| 2566 | mutex_lock(&cgroup_tree_mutex); | ||
| 2567 | |||
| 2568 | list_add_tail(&cfts->node, &ss->cfts); | ||
| 2569 | ret = cgroup_apply_cftypes(cfts, true); | ||
| 2570 | if (ret) | ||
| 2571 | cgroup_rm_cftypes_locked(cfts); | ||
| 2572 | |||
| 2573 | mutex_unlock(&cgroup_tree_mutex); | ||
| 2574 | return ret; | ||
| 2863 | } | 2575 | } |
| 2864 | 2576 | ||
| 2865 | /** | 2577 | /** |
| @@ -2868,57 +2580,18 @@ int cgroup_rm_cftypes(struct cftype *cfts) | |||
| 2868 | * | 2580 | * |
| 2869 | * Return the number of tasks in the cgroup. | 2581 | * Return the number of tasks in the cgroup. |
| 2870 | */ | 2582 | */ |
| 2871 | int cgroup_task_count(const struct cgroup *cgrp) | 2583 | static int cgroup_task_count(const struct cgroup *cgrp) |
| 2872 | { | 2584 | { |
| 2873 | int count = 0; | 2585 | int count = 0; |
| 2874 | struct cgrp_cset_link *link; | 2586 | struct cgrp_cset_link *link; |
| 2875 | 2587 | ||
| 2876 | read_lock(&css_set_lock); | 2588 | down_read(&css_set_rwsem); |
| 2877 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | 2589 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
| 2878 | count += atomic_read(&link->cset->refcount); | 2590 | count += atomic_read(&link->cset->refcount); |
| 2879 | read_unlock(&css_set_lock); | 2591 | up_read(&css_set_rwsem); |
| 2880 | return count; | 2592 | return count; |
| 2881 | } | 2593 | } |
| 2882 | 2594 | ||
| 2883 | /* | ||
| 2884 | * To reduce the fork() overhead for systems that are not actually using | ||
| 2885 | * their cgroups capability, we don't maintain the lists running through | ||
| 2886 | * each css_set to its tasks until we see the list actually used - in other | ||
| 2887 | * words after the first call to css_task_iter_start(). | ||
| 2888 | */ | ||
| 2889 | static void cgroup_enable_task_cg_lists(void) | ||
| 2890 | { | ||
| 2891 | struct task_struct *p, *g; | ||
| 2892 | write_lock(&css_set_lock); | ||
| 2893 | use_task_css_set_links = 1; | ||
| 2894 | /* | ||
| 2895 | * We need tasklist_lock because RCU is not safe against | ||
| 2896 | * while_each_thread(). Besides, a forking task that has passed | ||
| 2897 | * cgroup_post_fork() without seeing use_task_css_set_links = 1 | ||
| 2898 | * is not guaranteed to have its child immediately visible in the | ||
| 2899 | * tasklist if we walk through it with RCU. | ||
| 2900 | */ | ||
| 2901 | read_lock(&tasklist_lock); | ||
| 2902 | do_each_thread(g, p) { | ||
| 2903 | task_lock(p); | ||
| 2904 | /* | ||
| 2905 | * We should check if the process is exiting, otherwise | ||
| 2906 | * it will race with cgroup_exit() in that the list | ||
| 2907 | * entry won't be deleted though the process has exited. | ||
| 2908 | * Do it while holding siglock so that we don't end up | ||
| 2909 | * racing against cgroup_exit(). | ||
| 2910 | */ | ||
| 2911 | spin_lock_irq(&p->sighand->siglock); | ||
| 2912 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | ||
| 2913 | list_add(&p->cg_list, &task_css_set(p)->tasks); | ||
| 2914 | spin_unlock_irq(&p->sighand->siglock); | ||
| 2915 | |||
| 2916 | task_unlock(p); | ||
| 2917 | } while_each_thread(g, p); | ||
| 2918 | read_unlock(&tasklist_lock); | ||
| 2919 | write_unlock(&css_set_lock); | ||
| 2920 | } | ||
| 2921 | |||
| 2922 | /** | 2595 | /** |
| 2923 | * css_next_child - find the next child of a given css | 2596 | * css_next_child - find the next child of a given css |
| 2924 | * @pos_css: the current position (%NULL to initiate traversal) | 2597 | * @pos_css: the current position (%NULL to initiate traversal) |
| @@ -2937,7 +2610,7 @@ css_next_child(struct cgroup_subsys_state *pos_css, | |||
| 2937 | struct cgroup *cgrp = parent_css->cgroup; | 2610 | struct cgroup *cgrp = parent_css->cgroup; |
| 2938 | struct cgroup *next; | 2611 | struct cgroup *next; |
| 2939 | 2612 | ||
| 2940 | cgroup_assert_mutex_or_rcu_locked(); | 2613 | cgroup_assert_mutexes_or_rcu_locked(); |
| 2941 | 2614 | ||
| 2942 | /* | 2615 | /* |
| 2943 | * @pos could already have been removed. Once a cgroup is removed, | 2616 | * @pos could already have been removed. Once a cgroup is removed, |
| @@ -2973,7 +2646,6 @@ css_next_child(struct cgroup_subsys_state *pos_css, | |||
| 2973 | 2646 | ||
| 2974 | return cgroup_css(next, parent_css->ss); | 2647 | return cgroup_css(next, parent_css->ss); |
| 2975 | } | 2648 | } |
| 2976 | EXPORT_SYMBOL_GPL(css_next_child); | ||
| 2977 | 2649 | ||
| 2978 | /** | 2650 | /** |
| 2979 | * css_next_descendant_pre - find the next descendant for pre-order walk | 2651 | * css_next_descendant_pre - find the next descendant for pre-order walk |
| @@ -2995,7 +2667,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, | |||
| 2995 | { | 2667 | { |
| 2996 | struct cgroup_subsys_state *next; | 2668 | struct cgroup_subsys_state *next; |
| 2997 | 2669 | ||
| 2998 | cgroup_assert_mutex_or_rcu_locked(); | 2670 | cgroup_assert_mutexes_or_rcu_locked(); |
| 2999 | 2671 | ||
| 3000 | /* if first iteration, visit @root */ | 2672 | /* if first iteration, visit @root */ |
| 3001 | if (!pos) | 2673 | if (!pos) |
| @@ -3016,7 +2688,6 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, | |||
| 3016 | 2688 | ||
| 3017 | return NULL; | 2689 | return NULL; |
| 3018 | } | 2690 | } |
| 3019 | EXPORT_SYMBOL_GPL(css_next_descendant_pre); | ||
| 3020 | 2691 | ||
| 3021 | /** | 2692 | /** |
| 3022 | * css_rightmost_descendant - return the rightmost descendant of a css | 2693 | * css_rightmost_descendant - return the rightmost descendant of a css |
| @@ -3036,7 +2707,7 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) | |||
| 3036 | { | 2707 | { |
| 3037 | struct cgroup_subsys_state *last, *tmp; | 2708 | struct cgroup_subsys_state *last, *tmp; |
| 3038 | 2709 | ||
| 3039 | cgroup_assert_mutex_or_rcu_locked(); | 2710 | cgroup_assert_mutexes_or_rcu_locked(); |
| 3040 | 2711 | ||
| 3041 | do { | 2712 | do { |
| 3042 | last = pos; | 2713 | last = pos; |
| @@ -3048,7 +2719,6 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) | |||
| 3048 | 2719 | ||
| 3049 | return last; | 2720 | return last; |
| 3050 | } | 2721 | } |
| 3051 | EXPORT_SYMBOL_GPL(css_rightmost_descendant); | ||
| 3052 | 2722 | ||
| 3053 | static struct cgroup_subsys_state * | 2723 | static struct cgroup_subsys_state * |
| 3054 | css_leftmost_descendant(struct cgroup_subsys_state *pos) | 2724 | css_leftmost_descendant(struct cgroup_subsys_state *pos) |
| @@ -3084,7 +2754,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
| 3084 | { | 2754 | { |
| 3085 | struct cgroup_subsys_state *next; | 2755 | struct cgroup_subsys_state *next; |
| 3086 | 2756 | ||
| 3087 | cgroup_assert_mutex_or_rcu_locked(); | 2757 | cgroup_assert_mutexes_or_rcu_locked(); |
| 3088 | 2758 | ||
| 3089 | /* if first iteration, visit leftmost descendant which may be @root */ | 2759 | /* if first iteration, visit leftmost descendant which may be @root */ |
| 3090 | if (!pos) | 2760 | if (!pos) |
| @@ -3102,7 +2772,6 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
| 3102 | /* no sibling left, visit parent */ | 2772 | /* no sibling left, visit parent */ |
| 3103 | return css_parent(pos); | 2773 | return css_parent(pos); |
| 3104 | } | 2774 | } |
| 3105 | EXPORT_SYMBOL_GPL(css_next_descendant_post); | ||
| 3106 | 2775 | ||
| 3107 | /** | 2776 | /** |
| 3108 | * css_advance_task_iter - advance a task itererator to the next css_set | 2777 | * css_advance_task_iter - advance a task itererator to the next css_set |
| @@ -3125,9 +2794,14 @@ static void css_advance_task_iter(struct css_task_iter *it) | |||
| 3125 | } | 2794 | } |
| 3126 | link = list_entry(l, struct cgrp_cset_link, cset_link); | 2795 | link = list_entry(l, struct cgrp_cset_link, cset_link); |
| 3127 | cset = link->cset; | 2796 | cset = link->cset; |
| 3128 | } while (list_empty(&cset->tasks)); | 2797 | } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks)); |
| 2798 | |||
| 3129 | it->cset_link = l; | 2799 | it->cset_link = l; |
| 3130 | it->task = cset->tasks.next; | 2800 | |
| 2801 | if (!list_empty(&cset->tasks)) | ||
| 2802 | it->task = cset->tasks.next; | ||
| 2803 | else | ||
| 2804 | it->task = cset->mg_tasks.next; | ||
| 3131 | } | 2805 | } |
| 3132 | 2806 | ||
| 3133 | /** | 2807 | /** |
| @@ -3146,17 +2820,12 @@ static void css_advance_task_iter(struct css_task_iter *it) | |||
| 3146 | */ | 2820 | */ |
| 3147 | void css_task_iter_start(struct cgroup_subsys_state *css, | 2821 | void css_task_iter_start(struct cgroup_subsys_state *css, |
| 3148 | struct css_task_iter *it) | 2822 | struct css_task_iter *it) |
| 3149 | __acquires(css_set_lock) | 2823 | __acquires(css_set_rwsem) |
| 3150 | { | 2824 | { |
| 3151 | /* | 2825 | /* no one should try to iterate before mounting cgroups */ |
| 3152 | * The first time anyone tries to iterate across a css, we need to | 2826 | WARN_ON_ONCE(!use_task_css_set_links); |
| 3153 | * enable the list linking each css_set to its tasks, and fix up | ||
| 3154 | * all existing tasks. | ||
| 3155 | */ | ||
| 3156 | if (!use_task_css_set_links) | ||
| 3157 | cgroup_enable_task_cg_lists(); | ||
| 3158 | 2827 | ||
| 3159 | read_lock(&css_set_lock); | 2828 | down_read(&css_set_rwsem); |
| 3160 | 2829 | ||
| 3161 | it->origin_css = css; | 2830 | it->origin_css = css; |
| 3162 | it->cset_link = &css->cgroup->cset_links; | 2831 | it->cset_link = &css->cgroup->cset_links; |
| @@ -3176,24 +2845,29 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
| 3176 | { | 2845 | { |
| 3177 | struct task_struct *res; | 2846 | struct task_struct *res; |
| 3178 | struct list_head *l = it->task; | 2847 | struct list_head *l = it->task; |
| 3179 | struct cgrp_cset_link *link; | 2848 | struct cgrp_cset_link *link = list_entry(it->cset_link, |
| 2849 | struct cgrp_cset_link, cset_link); | ||
| 3180 | 2850 | ||
| 3181 | /* If the iterator cg is NULL, we have no tasks */ | 2851 | /* If the iterator cg is NULL, we have no tasks */ |
| 3182 | if (!it->cset_link) | 2852 | if (!it->cset_link) |
| 3183 | return NULL; | 2853 | return NULL; |
| 3184 | res = list_entry(l, struct task_struct, cg_list); | 2854 | res = list_entry(l, struct task_struct, cg_list); |
| 3185 | /* Advance iterator to find next entry */ | 2855 | |
| 2856 | /* | ||
| 2857 | * Advance iterator to find next entry. cset->tasks is consumed | ||
| 2858 | * first and then ->mg_tasks. After ->mg_tasks, we move onto the | ||
| 2859 | * next cset. | ||
| 2860 | */ | ||
| 3186 | l = l->next; | 2861 | l = l->next; |
| 3187 | link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link); | 2862 | |
| 3188 | if (l == &link->cset->tasks) { | 2863 | if (l == &link->cset->tasks) |
| 3189 | /* | 2864 | l = link->cset->mg_tasks.next; |
| 3190 | * We reached the end of this task list - move on to the | 2865 | |
| 3191 | * next cgrp_cset_link. | 2866 | if (l == &link->cset->mg_tasks) |
| 3192 | */ | ||
| 3193 | css_advance_task_iter(it); | 2867 | css_advance_task_iter(it); |
| 3194 | } else { | 2868 | else |
| 3195 | it->task = l; | 2869 | it->task = l; |
| 3196 | } | 2870 | |
| 3197 | return res; | 2871 | return res; |
| 3198 | } | 2872 | } |
| 3199 | 2873 | ||
| @@ -3204,191 +2878,62 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
| 3204 | * Finish task iteration started by css_task_iter_start(). | 2878 | * Finish task iteration started by css_task_iter_start(). |
| 3205 | */ | 2879 | */ |
| 3206 | void css_task_iter_end(struct css_task_iter *it) | 2880 | void css_task_iter_end(struct css_task_iter *it) |
| 3207 | __releases(css_set_lock) | 2881 | __releases(css_set_rwsem) |
| 3208 | { | ||
| 3209 | read_unlock(&css_set_lock); | ||
| 3210 | } | ||
| 3211 | |||
| 3212 | static inline int started_after_time(struct task_struct *t1, | ||
| 3213 | struct timespec *time, | ||
| 3214 | struct task_struct *t2) | ||
| 3215 | { | ||
| 3216 | int start_diff = timespec_compare(&t1->start_time, time); | ||
| 3217 | if (start_diff > 0) { | ||
| 3218 | return 1; | ||
| 3219 | } else if (start_diff < 0) { | ||
| 3220 | return 0; | ||
| 3221 | } else { | ||
| 3222 | /* | ||
| 3223 | * Arbitrarily, if two processes started at the same | ||
| 3224 | * time, we'll say that the lower pointer value | ||
| 3225 | * started first. Note that t2 may have exited by now | ||
| 3226 | * so this may not be a valid pointer any longer, but | ||
| 3227 | * that's fine - it still serves to distinguish | ||
| 3228 | * between two tasks started (effectively) simultaneously. | ||
| 3229 | */ | ||
| 3230 | return t1 > t2; | ||
| 3231 | } | ||
| 3232 | } | ||
| 3233 | |||
| 3234 | /* | ||
| 3235 | * This function is a callback from heap_insert() and is used to order | ||
| 3236 | * the heap. | ||
| 3237 | * In this case we order the heap in descending task start time. | ||
| 3238 | */ | ||
| 3239 | static inline int started_after(void *p1, void *p2) | ||
| 3240 | { | 2882 | { |
| 3241 | struct task_struct *t1 = p1; | 2883 | up_read(&css_set_rwsem); |
| 3242 | struct task_struct *t2 = p2; | ||
| 3243 | return started_after_time(t1, &t2->start_time, t2); | ||
| 3244 | } | 2884 | } |
| 3245 | 2885 | ||
| 3246 | /** | 2886 | /** |
| 3247 | * css_scan_tasks - iterate though all the tasks in a css | 2887 | * cgroup_trasnsfer_tasks - move tasks from one cgroup to another |
| 3248 | * @css: the css to iterate tasks of | 2888 | * @to: cgroup to which the tasks will be moved |
| 3249 | * @test: optional test callback | 2889 | * @from: cgroup in which the tasks currently reside |
| 3250 | * @process: process callback | ||
| 3251 | * @data: data passed to @test and @process | ||
| 3252 | * @heap: optional pre-allocated heap used for task iteration | ||
| 3253 | * | ||
| 3254 | * Iterate through all the tasks in @css, calling @test for each, and if it | ||
| 3255 | * returns %true, call @process for it also. | ||
| 3256 | * | ||
| 3257 | * @test may be NULL, meaning always true (select all tasks), which | ||
| 3258 | * effectively duplicates css_task_iter_{start,next,end}() but does not | ||
| 3259 | * lock css_set_lock for the call to @process. | ||
| 3260 | * | ||
| 3261 | * It is guaranteed that @process will act on every task that is a member | ||
| 3262 | * of @css for the duration of this call. This function may or may not | ||
| 3263 | * call @process for tasks that exit or move to a different css during the | ||
| 3264 | * call, or are forked or move into the css during the call. | ||
| 3265 | * | ||
| 3266 | * Note that @test may be called with locks held, and may in some | ||
| 3267 | * situations be called multiple times for the same task, so it should be | ||
| 3268 | * cheap. | ||
| 3269 | * | 2890 | * |
| 3270 | * If @heap is non-NULL, a heap has been pre-allocated and will be used for | 2891 | * Locking rules between cgroup_post_fork() and the migration path |
| 3271 | * heap operations (and its "gt" member will be overwritten), else a | 2892 | * guarantee that, if a task is forking while being migrated, the new child |
| 3272 | * temporary heap will be used (allocation of which may cause this function | 2893 | * is guaranteed to be either visible in the source cgroup after the |
| 3273 | * to fail). | 2894 | * parent's migration is complete or put into the target cgroup. No task |
| 2895 | * can slip out of migration through forking. | ||
| 3274 | */ | 2896 | */ |
| 3275 | int css_scan_tasks(struct cgroup_subsys_state *css, | 2897 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) |
| 3276 | bool (*test)(struct task_struct *, void *), | ||
| 3277 | void (*process)(struct task_struct *, void *), | ||
| 3278 | void *data, struct ptr_heap *heap) | ||
| 3279 | { | 2898 | { |
| 3280 | int retval, i; | 2899 | LIST_HEAD(preloaded_csets); |
| 2900 | struct cgrp_cset_link *link; | ||
| 3281 | struct css_task_iter it; | 2901 | struct css_task_iter it; |
| 3282 | struct task_struct *p, *dropped; | 2902 | struct task_struct *task; |
| 3283 | /* Never dereference latest_task, since it's not refcounted */ | 2903 | int ret; |
| 3284 | struct task_struct *latest_task = NULL; | ||
| 3285 | struct ptr_heap tmp_heap; | ||
| 3286 | struct timespec latest_time = { 0, 0 }; | ||
| 3287 | |||
| 3288 | if (heap) { | ||
| 3289 | /* The caller supplied our heap and pre-allocated its memory */ | ||
| 3290 | heap->gt = &started_after; | ||
| 3291 | } else { | ||
| 3292 | /* We need to allocate our own heap memory */ | ||
| 3293 | heap = &tmp_heap; | ||
| 3294 | retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after); | ||
| 3295 | if (retval) | ||
| 3296 | /* cannot allocate the heap */ | ||
| 3297 | return retval; | ||
| 3298 | } | ||
| 3299 | 2904 | ||
| 3300 | again: | 2905 | mutex_lock(&cgroup_mutex); |
| 3301 | /* | ||
| 3302 | * Scan tasks in the css, using the @test callback to determine | ||
| 3303 | * which are of interest, and invoking @process callback on the | ||
| 3304 | * ones which need an update. Since we don't want to hold any | ||
| 3305 | * locks during the task updates, gather tasks to be processed in a | ||
| 3306 | * heap structure. The heap is sorted by descending task start | ||
| 3307 | * time. If the statically-sized heap fills up, we overflow tasks | ||
| 3308 | * that started later, and in future iterations only consider tasks | ||
| 3309 | * that started after the latest task in the previous pass. This | ||
| 3310 | * guarantees forward progress and that we don't miss any tasks. | ||
| 3311 | */ | ||
| 3312 | heap->size = 0; | ||
| 3313 | css_task_iter_start(css, &it); | ||
| 3314 | while ((p = css_task_iter_next(&it))) { | ||
| 3315 | /* | ||
| 3316 | * Only affect tasks that qualify per the caller's callback, | ||
| 3317 | * if he provided one | ||
| 3318 | */ | ||
| 3319 | if (test && !test(p, data)) | ||
| 3320 | continue; | ||
| 3321 | /* | ||
| 3322 | * Only process tasks that started after the last task | ||
| 3323 | * we processed | ||
| 3324 | */ | ||
| 3325 | if (!started_after_time(p, &latest_time, latest_task)) | ||
| 3326 | continue; | ||
| 3327 | dropped = heap_insert(heap, p); | ||
| 3328 | if (dropped == NULL) { | ||
| 3329 | /* | ||
| 3330 | * The new task was inserted; the heap wasn't | ||
| 3331 | * previously full | ||
| 3332 | */ | ||
| 3333 | get_task_struct(p); | ||
| 3334 | } else if (dropped != p) { | ||
| 3335 | /* | ||
| 3336 | * The new task was inserted, and pushed out a | ||
| 3337 | * different task | ||
| 3338 | */ | ||
| 3339 | get_task_struct(p); | ||
| 3340 | put_task_struct(dropped); | ||
| 3341 | } | ||
| 3342 | /* | ||
| 3343 | * Else the new task was newer than anything already in | ||
| 3344 | * the heap and wasn't inserted | ||
| 3345 | */ | ||
| 3346 | } | ||
| 3347 | css_task_iter_end(&it); | ||
| 3348 | 2906 | ||
| 3349 | if (heap->size) { | 2907 | /* all tasks in @from are being moved, all csets are source */ |
| 3350 | for (i = 0; i < heap->size; i++) { | 2908 | down_read(&css_set_rwsem); |
| 3351 | struct task_struct *q = heap->ptrs[i]; | 2909 | list_for_each_entry(link, &from->cset_links, cset_link) |
| 3352 | if (i == 0) { | 2910 | cgroup_migrate_add_src(link->cset, to, &preloaded_csets); |
| 3353 | latest_time = q->start_time; | 2911 | up_read(&css_set_rwsem); |
| 3354 | latest_task = q; | ||
| 3355 | } | ||
| 3356 | /* Process the task per the caller's callback */ | ||
| 3357 | process(q, data); | ||
| 3358 | put_task_struct(q); | ||
| 3359 | } | ||
| 3360 | /* | ||
| 3361 | * If we had to process any tasks at all, scan again | ||
| 3362 | * in case some of them were in the middle of forking | ||
| 3363 | * children that didn't get processed. | ||
| 3364 | * Not the most efficient way to do it, but it avoids | ||
| 3365 | * having to take callback_mutex in the fork path | ||
| 3366 | */ | ||
| 3367 | goto again; | ||
| 3368 | } | ||
| 3369 | if (heap == &tmp_heap) | ||
| 3370 | heap_free(&tmp_heap); | ||
| 3371 | return 0; | ||
| 3372 | } | ||
| 3373 | 2912 | ||
| 3374 | static void cgroup_transfer_one_task(struct task_struct *task, void *data) | 2913 | ret = cgroup_migrate_prepare_dst(to, &preloaded_csets); |
| 3375 | { | 2914 | if (ret) |
| 3376 | struct cgroup *new_cgroup = data; | 2915 | goto out_err; |
| 3377 | 2916 | ||
| 3378 | mutex_lock(&cgroup_mutex); | 2917 | /* |
| 3379 | cgroup_attach_task(new_cgroup, task, false); | 2918 | * Migrate tasks one-by-one until @form is empty. This fails iff |
| 2919 | * ->can_attach() fails. | ||
| 2920 | */ | ||
| 2921 | do { | ||
| 2922 | css_task_iter_start(&from->dummy_css, &it); | ||
| 2923 | task = css_task_iter_next(&it); | ||
| 2924 | if (task) | ||
| 2925 | get_task_struct(task); | ||
| 2926 | css_task_iter_end(&it); | ||
| 2927 | |||
| 2928 | if (task) { | ||
| 2929 | ret = cgroup_migrate(to, task, false); | ||
| 2930 | put_task_struct(task); | ||
| 2931 | } | ||
| 2932 | } while (task && !ret); | ||
| 2933 | out_err: | ||
| 2934 | cgroup_migrate_finish(&preloaded_csets); | ||
| 3380 | mutex_unlock(&cgroup_mutex); | 2935 | mutex_unlock(&cgroup_mutex); |
| 3381 | } | 2936 | return ret; |
| 3382 | |||
| 3383 | /** | ||
| 3384 | * cgroup_trasnsfer_tasks - move tasks from one cgroup to another | ||
| 3385 | * @to: cgroup to which the tasks will be moved | ||
| 3386 | * @from: cgroup in which the tasks currently reside | ||
| 3387 | */ | ||
| 3388 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) | ||
| 3389 | { | ||
| 3390 | return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task, | ||
| 3391 | to, NULL); | ||
| 3392 | } | 2937 | } |
| 3393 | 2938 | ||
| 3394 | /* | 2939 | /* |
| @@ -3687,21 +3232,31 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |||
| 3687 | */ | 3232 | */ |
| 3688 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | 3233 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) |
| 3689 | { | 3234 | { |
| 3690 | int ret = -EINVAL; | 3235 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); |
| 3691 | struct cgroup *cgrp; | 3236 | struct cgroup *cgrp; |
| 3692 | struct css_task_iter it; | 3237 | struct css_task_iter it; |
| 3693 | struct task_struct *tsk; | 3238 | struct task_struct *tsk; |
| 3694 | 3239 | ||
| 3240 | /* it should be kernfs_node belonging to cgroupfs and is a directory */ | ||
| 3241 | if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || | ||
| 3242 | kernfs_type(kn) != KERNFS_DIR) | ||
| 3243 | return -EINVAL; | ||
| 3244 | |||
| 3245 | mutex_lock(&cgroup_mutex); | ||
| 3246 | |||
| 3695 | /* | 3247 | /* |
| 3696 | * Validate dentry by checking the superblock operations, | 3248 | * We aren't being called from kernfs and there's no guarantee on |
| 3697 | * and make sure it's a directory. | 3249 | * @kn->priv's validity. For this and css_tryget_from_dir(), |
| 3250 | * @kn->priv is RCU safe. Let's do the RCU dancing. | ||
| 3698 | */ | 3251 | */ |
| 3699 | if (dentry->d_sb->s_op != &cgroup_ops || | 3252 | rcu_read_lock(); |
| 3700 | !S_ISDIR(dentry->d_inode->i_mode)) | 3253 | cgrp = rcu_dereference(kn->priv); |
| 3701 | goto err; | 3254 | if (!cgrp || cgroup_is_dead(cgrp)) { |
| 3702 | 3255 | rcu_read_unlock(); | |
| 3703 | ret = 0; | 3256 | mutex_unlock(&cgroup_mutex); |
| 3704 | cgrp = dentry->d_fsdata; | 3257 | return -ENOENT; |
| 3258 | } | ||
| 3259 | rcu_read_unlock(); | ||
| 3705 | 3260 | ||
| 3706 | css_task_iter_start(&cgrp->dummy_css, &it); | 3261 | css_task_iter_start(&cgrp->dummy_css, &it); |
| 3707 | while ((tsk = css_task_iter_next(&it))) { | 3262 | while ((tsk = css_task_iter_next(&it))) { |
| @@ -3726,8 +3281,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | |||
| 3726 | } | 3281 | } |
| 3727 | css_task_iter_end(&it); | 3282 | css_task_iter_end(&it); |
| 3728 | 3283 | ||
| 3729 | err: | 3284 | mutex_unlock(&cgroup_mutex); |
| 3730 | return ret; | 3285 | return 0; |
| 3731 | } | 3286 | } |
| 3732 | 3287 | ||
| 3733 | 3288 | ||
| @@ -3745,7 +3300,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) | |||
| 3745 | * after a seek to the start). Use a binary-search to find the | 3300 | * after a seek to the start). Use a binary-search to find the |
| 3746 | * next pid to display, if any | 3301 | * next pid to display, if any |
| 3747 | */ | 3302 | */ |
| 3748 | struct cgroup_open_file *of = s->private; | 3303 | struct kernfs_open_file *of = s->private; |
| 3749 | struct cgroup *cgrp = seq_css(s)->cgroup; | 3304 | struct cgroup *cgrp = seq_css(s)->cgroup; |
| 3750 | struct cgroup_pidlist *l; | 3305 | struct cgroup_pidlist *l; |
| 3751 | enum cgroup_filetype type = seq_cft(s)->private; | 3306 | enum cgroup_filetype type = seq_cft(s)->private; |
| @@ -3800,7 +3355,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) | |||
| 3800 | 3355 | ||
| 3801 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) | 3356 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) |
| 3802 | { | 3357 | { |
| 3803 | struct cgroup_open_file *of = s->private; | 3358 | struct kernfs_open_file *of = s->private; |
| 3804 | struct cgroup_pidlist *l = of->priv; | 3359 | struct cgroup_pidlist *l = of->priv; |
| 3805 | 3360 | ||
| 3806 | if (l) | 3361 | if (l) |
| @@ -3811,7 +3366,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v) | |||
| 3811 | 3366 | ||
| 3812 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) | 3367 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) |
| 3813 | { | 3368 | { |
| 3814 | struct cgroup_open_file *of = s->private; | 3369 | struct kernfs_open_file *of = s->private; |
| 3815 | struct cgroup_pidlist *l = of->priv; | 3370 | struct cgroup_pidlist *l = of->priv; |
| 3816 | pid_t *p = v; | 3371 | pid_t *p = v; |
| 3817 | pid_t *end = l->list + l->length; | 3372 | pid_t *end = l->list + l->length; |
| @@ -3861,23 +3416,6 @@ static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, | |||
| 3861 | return 0; | 3416 | return 0; |
| 3862 | } | 3417 | } |
| 3863 | 3418 | ||
| 3864 | /* | ||
| 3865 | * When dput() is called asynchronously, if umount has been done and | ||
| 3866 | * then deactivate_super() in cgroup_free_fn() kills the superblock, | ||
| 3867 | * there's a small window that vfs will see the root dentry with non-zero | ||
| 3868 | * refcnt and trigger BUG(). | ||
| 3869 | * | ||
| 3870 | * That's why we hold a reference before dput() and drop it right after. | ||
| 3871 | */ | ||
| 3872 | static void cgroup_dput(struct cgroup *cgrp) | ||
| 3873 | { | ||
| 3874 | struct super_block *sb = cgrp->root->sb; | ||
| 3875 | |||
| 3876 | atomic_inc(&sb->s_active); | ||
| 3877 | dput(cgrp->dentry); | ||
| 3878 | deactivate_super(sb); | ||
| 3879 | } | ||
| 3880 | |||
| 3881 | static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, | 3419 | static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, |
| 3882 | struct cftype *cft) | 3420 | struct cftype *cft) |
| 3883 | { | 3421 | { |
| @@ -3944,7 +3482,7 @@ static struct cftype cgroup_base_files[] = { | |||
| 3944 | .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, | 3482 | .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, |
| 3945 | .seq_show = cgroup_release_agent_show, | 3483 | .seq_show = cgroup_release_agent_show, |
| 3946 | .write_string = cgroup_release_agent_write, | 3484 | .write_string = cgroup_release_agent_write, |
| 3947 | .max_write_len = PATH_MAX, | 3485 | .max_write_len = PATH_MAX - 1, |
| 3948 | }, | 3486 | }, |
| 3949 | { } /* terminate */ | 3487 | { } /* terminate */ |
| 3950 | }; | 3488 | }; |
| @@ -3963,13 +3501,13 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask) | |||
| 3963 | 3501 | ||
| 3964 | /* process cftsets of each subsystem */ | 3502 | /* process cftsets of each subsystem */ |
| 3965 | for_each_subsys(ss, i) { | 3503 | for_each_subsys(ss, i) { |
| 3966 | struct cftype_set *set; | 3504 | struct cftype *cfts; |
| 3967 | 3505 | ||
| 3968 | if (!test_bit(i, &subsys_mask)) | 3506 | if (!test_bit(i, &subsys_mask)) |
| 3969 | continue; | 3507 | continue; |
| 3970 | 3508 | ||
| 3971 | list_for_each_entry(set, &ss->cftsets, node) { | 3509 | list_for_each_entry(cfts, &ss->cfts, node) { |
| 3972 | ret = cgroup_addrm_files(cgrp, set->cfts, true); | 3510 | ret = cgroup_addrm_files(cgrp, cfts, true); |
| 3973 | if (ret < 0) | 3511 | if (ret < 0) |
| 3974 | goto err; | 3512 | goto err; |
| 3975 | } | 3513 | } |
| @@ -4012,7 +3550,7 @@ static void css_free_work_fn(struct work_struct *work) | |||
| 4012 | css_put(css->parent); | 3550 | css_put(css->parent); |
| 4013 | 3551 | ||
| 4014 | css->ss->css_free(css); | 3552 | css->ss->css_free(css); |
| 4015 | cgroup_dput(cgrp); | 3553 | cgroup_put(cgrp); |
| 4016 | } | 3554 | } |
| 4017 | 3555 | ||
| 4018 | static void css_free_rcu_fn(struct rcu_head *rcu_head) | 3556 | static void css_free_rcu_fn(struct rcu_head *rcu_head) |
| @@ -4020,10 +3558,6 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head) | |||
| 4020 | struct cgroup_subsys_state *css = | 3558 | struct cgroup_subsys_state *css = |
| 4021 | container_of(rcu_head, struct cgroup_subsys_state, rcu_head); | 3559 | container_of(rcu_head, struct cgroup_subsys_state, rcu_head); |
| 4022 | 3560 | ||
| 4023 | /* | ||
| 4024 | * css holds an extra ref to @cgrp->dentry which is put on the last | ||
| 4025 | * css_put(). dput() requires process context which we don't have. | ||
| 4026 | */ | ||
| 4027 | INIT_WORK(&css->destroy_work, css_free_work_fn); | 3561 | INIT_WORK(&css->destroy_work, css_free_work_fn); |
| 4028 | queue_work(cgroup_destroy_wq, &css->destroy_work); | 3562 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
| 4029 | } | 3563 | } |
| @@ -4033,7 +3567,7 @@ static void css_release(struct percpu_ref *ref) | |||
| 4033 | struct cgroup_subsys_state *css = | 3567 | struct cgroup_subsys_state *css = |
| 4034 | container_of(ref, struct cgroup_subsys_state, refcnt); | 3568 | container_of(ref, struct cgroup_subsys_state, refcnt); |
| 4035 | 3569 | ||
| 4036 | rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); | 3570 | RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL); |
| 4037 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 3571 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
| 4038 | } | 3572 | } |
| 4039 | 3573 | ||
| @@ -4058,6 +3592,7 @@ static int online_css(struct cgroup_subsys_state *css) | |||
| 4058 | struct cgroup_subsys *ss = css->ss; | 3592 | struct cgroup_subsys *ss = css->ss; |
| 4059 | int ret = 0; | 3593 | int ret = 0; |
| 4060 | 3594 | ||
| 3595 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4061 | lockdep_assert_held(&cgroup_mutex); | 3596 | lockdep_assert_held(&cgroup_mutex); |
| 4062 | 3597 | ||
| 4063 | if (ss->css_online) | 3598 | if (ss->css_online) |
| @@ -4065,7 +3600,7 @@ static int online_css(struct cgroup_subsys_state *css) | |||
| 4065 | if (!ret) { | 3600 | if (!ret) { |
| 4066 | css->flags |= CSS_ONLINE; | 3601 | css->flags |= CSS_ONLINE; |
| 4067 | css->cgroup->nr_css++; | 3602 | css->cgroup->nr_css++; |
| 4068 | rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css); | 3603 | rcu_assign_pointer(css->cgroup->subsys[ss->id], css); |
| 4069 | } | 3604 | } |
| 4070 | return ret; | 3605 | return ret; |
| 4071 | } | 3606 | } |
| @@ -4075,6 +3610,7 @@ static void offline_css(struct cgroup_subsys_state *css) | |||
| 4075 | { | 3610 | { |
| 4076 | struct cgroup_subsys *ss = css->ss; | 3611 | struct cgroup_subsys *ss = css->ss; |
| 4077 | 3612 | ||
| 3613 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4078 | lockdep_assert_held(&cgroup_mutex); | 3614 | lockdep_assert_held(&cgroup_mutex); |
| 4079 | 3615 | ||
| 4080 | if (!(css->flags & CSS_ONLINE)) | 3616 | if (!(css->flags & CSS_ONLINE)) |
| @@ -4085,7 +3621,7 @@ static void offline_css(struct cgroup_subsys_state *css) | |||
| 4085 | 3621 | ||
| 4086 | css->flags &= ~CSS_ONLINE; | 3622 | css->flags &= ~CSS_ONLINE; |
| 4087 | css->cgroup->nr_css--; | 3623 | css->cgroup->nr_css--; |
| 4088 | RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css); | 3624 | RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css); |
| 4089 | } | 3625 | } |
| 4090 | 3626 | ||
| 4091 | /** | 3627 | /** |
| @@ -4103,7 +3639,6 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4103 | struct cgroup_subsys_state *css; | 3639 | struct cgroup_subsys_state *css; |
| 4104 | int err; | 3640 | int err; |
| 4105 | 3641 | ||
| 4106 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | ||
| 4107 | lockdep_assert_held(&cgroup_mutex); | 3642 | lockdep_assert_held(&cgroup_mutex); |
| 4108 | 3643 | ||
| 4109 | css = ss->css_alloc(cgroup_css(parent, ss)); | 3644 | css = ss->css_alloc(cgroup_css(parent, ss)); |
| @@ -4112,21 +3647,23 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4112 | 3647 | ||
| 4113 | err = percpu_ref_init(&css->refcnt, css_release); | 3648 | err = percpu_ref_init(&css->refcnt, css_release); |
| 4114 | if (err) | 3649 | if (err) |
| 4115 | goto err_free; | 3650 | goto err_free_css; |
| 4116 | 3651 | ||
| 4117 | init_css(css, ss, cgrp); | 3652 | init_css(css, ss, cgrp); |
| 4118 | 3653 | ||
| 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 3654 | err = cgroup_populate_dir(cgrp, 1 << ss->id); |
| 4120 | if (err) | 3655 | if (err) |
| 4121 | goto err_free; | 3656 | goto err_free_percpu_ref; |
| 4122 | 3657 | ||
| 4123 | err = online_css(css); | 3658 | err = online_css(css); |
| 4124 | if (err) | 3659 | if (err) |
| 4125 | goto err_free; | 3660 | goto err_clear_dir; |
| 4126 | 3661 | ||
| 4127 | dget(cgrp->dentry); | 3662 | cgroup_get(cgrp); |
| 4128 | css_get(css->parent); | 3663 | css_get(css->parent); |
| 4129 | 3664 | ||
| 3665 | cgrp->subsys_mask |= 1 << ss->id; | ||
| 3666 | |||
| 4130 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && | 3667 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && |
| 4131 | parent->parent) { | 3668 | parent->parent) { |
| 4132 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", | 3669 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", |
| @@ -4138,41 +3675,43 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4138 | 3675 | ||
| 4139 | return 0; | 3676 | return 0; |
| 4140 | 3677 | ||
| 4141 | err_free: | 3678 | err_clear_dir: |
| 3679 | cgroup_clear_dir(css->cgroup, 1 << css->ss->id); | ||
| 3680 | err_free_percpu_ref: | ||
| 4142 | percpu_ref_cancel_init(&css->refcnt); | 3681 | percpu_ref_cancel_init(&css->refcnt); |
| 3682 | err_free_css: | ||
| 4143 | ss->css_free(css); | 3683 | ss->css_free(css); |
| 4144 | return err; | 3684 | return err; |
| 4145 | } | 3685 | } |
| 4146 | 3686 | ||
| 4147 | /* | 3687 | /** |
| 4148 | * cgroup_create - create a cgroup | 3688 | * cgroup_create - create a cgroup |
| 4149 | * @parent: cgroup that will be parent of the new cgroup | 3689 | * @parent: cgroup that will be parent of the new cgroup |
| 4150 | * @dentry: dentry of the new cgroup | 3690 | * @name: name of the new cgroup |
| 4151 | * @mode: mode to set on new inode | 3691 | * @mode: mode to set on new cgroup |
| 4152 | * | ||
| 4153 | * Must be called with the mutex on the parent inode held | ||
| 4154 | */ | 3692 | */ |
| 4155 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | 3693 | static long cgroup_create(struct cgroup *parent, const char *name, |
| 4156 | umode_t mode) | 3694 | umode_t mode) |
| 4157 | { | 3695 | { |
| 4158 | struct cgroup *cgrp; | 3696 | struct cgroup *cgrp; |
| 4159 | struct cgroup_name *name; | 3697 | struct cgroup_root *root = parent->root; |
| 4160 | struct cgroupfs_root *root = parent->root; | ||
| 4161 | int ssid, err; | 3698 | int ssid, err; |
| 4162 | struct cgroup_subsys *ss; | 3699 | struct cgroup_subsys *ss; |
| 4163 | struct super_block *sb = root->sb; | 3700 | struct kernfs_node *kn; |
| 3701 | |||
| 3702 | /* | ||
| 3703 | * XXX: The default hierarchy isn't fully implemented yet. Block | ||
| 3704 | * !root cgroup creation on it for now. | ||
| 3705 | */ | ||
| 3706 | if (root == &cgrp_dfl_root) | ||
| 3707 | return -EINVAL; | ||
| 4164 | 3708 | ||
| 4165 | /* allocate the cgroup and its ID, 0 is reserved for the root */ | 3709 | /* allocate the cgroup and its ID, 0 is reserved for the root */ |
| 4166 | cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); | 3710 | cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); |
| 4167 | if (!cgrp) | 3711 | if (!cgrp) |
| 4168 | return -ENOMEM; | 3712 | return -ENOMEM; |
| 4169 | 3713 | ||
| 4170 | name = cgroup_alloc_name(dentry); | 3714 | mutex_lock(&cgroup_tree_mutex); |
| 4171 | if (!name) { | ||
| 4172 | err = -ENOMEM; | ||
| 4173 | goto err_free_cgrp; | ||
| 4174 | } | ||
| 4175 | rcu_assign_pointer(cgrp->name, name); | ||
| 4176 | 3715 | ||
| 4177 | /* | 3716 | /* |
| 4178 | * Only live parents can have children. Note that the liveliness | 3717 | * Only live parents can have children. Note that the liveliness |
| @@ -4183,7 +3722,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4183 | */ | 3722 | */ |
| 4184 | if (!cgroup_lock_live_group(parent)) { | 3723 | if (!cgroup_lock_live_group(parent)) { |
| 4185 | err = -ENODEV; | 3724 | err = -ENODEV; |
| 4186 | goto err_free_name; | 3725 | goto err_unlock_tree; |
| 4187 | } | 3726 | } |
| 4188 | 3727 | ||
| 4189 | /* | 3728 | /* |
| @@ -4196,18 +3735,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4196 | goto err_unlock; | 3735 | goto err_unlock; |
| 4197 | } | 3736 | } |
| 4198 | 3737 | ||
| 4199 | /* Grab a reference on the superblock so the hierarchy doesn't | ||
| 4200 | * get deleted on unmount if there are child cgroups. This | ||
| 4201 | * can be done outside cgroup_mutex, since the sb can't | ||
| 4202 | * disappear while someone has an open control file on the | ||
| 4203 | * fs */ | ||
| 4204 | atomic_inc(&sb->s_active); | ||
| 4205 | |||
| 4206 | init_cgroup_housekeeping(cgrp); | 3738 | init_cgroup_housekeeping(cgrp); |
| 4207 | 3739 | ||
| 4208 | dentry->d_fsdata = cgrp; | ||
| 4209 | cgrp->dentry = dentry; | ||
| 4210 | |||
| 4211 | cgrp->parent = parent; | 3740 | cgrp->parent = parent; |
| 4212 | cgrp->dummy_css.parent = &parent->dummy_css; | 3741 | cgrp->dummy_css.parent = &parent->dummy_css; |
| 4213 | cgrp->root = parent->root; | 3742 | cgrp->root = parent->root; |
| @@ -4218,24 +3747,26 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4218 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) | 3747 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) |
| 4219 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); | 3748 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); |
| 4220 | 3749 | ||
| 3750 | /* create the directory */ | ||
| 3751 | kn = kernfs_create_dir(parent->kn, name, mode, cgrp); | ||
| 3752 | if (IS_ERR(kn)) { | ||
| 3753 | err = PTR_ERR(kn); | ||
| 3754 | goto err_free_id; | ||
| 3755 | } | ||
| 3756 | cgrp->kn = kn; | ||
| 3757 | |||
| 4221 | /* | 3758 | /* |
| 4222 | * Create directory. cgroup_create_file() returns with the new | 3759 | * This extra ref will be put in cgroup_free_fn() and guarantees |
| 4223 | * directory locked on success so that it can be populated without | 3760 | * that @cgrp->kn is always accessible. |
| 4224 | * dropping cgroup_mutex. | ||
| 4225 | */ | 3761 | */ |
| 4226 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); | 3762 | kernfs_get(kn); |
| 4227 | if (err < 0) | ||
| 4228 | goto err_free_id; | ||
| 4229 | lockdep_assert_held(&dentry->d_inode->i_mutex); | ||
| 4230 | 3763 | ||
| 4231 | cgrp->serial_nr = cgroup_serial_nr_next++; | 3764 | cgrp->serial_nr = cgroup_serial_nr_next++; |
| 4232 | 3765 | ||
| 4233 | /* allocation complete, commit to creation */ | 3766 | /* allocation complete, commit to creation */ |
| 4234 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); | 3767 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); |
| 4235 | root->number_of_cgroups++; | 3768 | atomic_inc(&root->nr_cgrps); |
| 4236 | 3769 | cgroup_get(parent); | |
| 4237 | /* hold a ref to the parent's dentry */ | ||
| 4238 | dget(parent->dentry); | ||
| 4239 | 3770 | ||
| 4240 | /* | 3771 | /* |
| 4241 | * @cgrp is now fully operational. If something fails after this | 3772 | * @cgrp is now fully operational. If something fails after this |
| @@ -4243,49 +3774,66 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4243 | */ | 3774 | */ |
| 4244 | idr_replace(&root->cgroup_idr, cgrp, cgrp->id); | 3775 | idr_replace(&root->cgroup_idr, cgrp, cgrp->id); |
| 4245 | 3776 | ||
| 3777 | err = cgroup_kn_set_ugid(kn); | ||
| 3778 | if (err) | ||
| 3779 | goto err_destroy; | ||
| 3780 | |||
| 4246 | err = cgroup_addrm_files(cgrp, cgroup_base_files, true); | 3781 | err = cgroup_addrm_files(cgrp, cgroup_base_files, true); |
| 4247 | if (err) | 3782 | if (err) |
| 4248 | goto err_destroy; | 3783 | goto err_destroy; |
| 4249 | 3784 | ||
| 4250 | /* let's create and online css's */ | 3785 | /* let's create and online css's */ |
| 4251 | for_each_subsys(ss, ssid) { | 3786 | for_each_subsys(ss, ssid) { |
| 4252 | if (root->subsys_mask & (1 << ssid)) { | 3787 | if (root->cgrp.subsys_mask & (1 << ssid)) { |
| 4253 | err = create_css(cgrp, ss); | 3788 | err = create_css(cgrp, ss); |
| 4254 | if (err) | 3789 | if (err) |
| 4255 | goto err_destroy; | 3790 | goto err_destroy; |
| 4256 | } | 3791 | } |
| 4257 | } | 3792 | } |
| 4258 | 3793 | ||
| 3794 | kernfs_activate(kn); | ||
| 3795 | |||
| 4259 | mutex_unlock(&cgroup_mutex); | 3796 | mutex_unlock(&cgroup_mutex); |
| 4260 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 3797 | mutex_unlock(&cgroup_tree_mutex); |
| 4261 | 3798 | ||
| 4262 | return 0; | 3799 | return 0; |
| 4263 | 3800 | ||
| 4264 | err_free_id: | 3801 | err_free_id: |
| 4265 | idr_remove(&root->cgroup_idr, cgrp->id); | 3802 | idr_remove(&root->cgroup_idr, cgrp->id); |
| 4266 | /* Release the reference count that we took on the superblock */ | ||
| 4267 | deactivate_super(sb); | ||
| 4268 | err_unlock: | 3803 | err_unlock: |
| 4269 | mutex_unlock(&cgroup_mutex); | 3804 | mutex_unlock(&cgroup_mutex); |
| 4270 | err_free_name: | 3805 | err_unlock_tree: |
| 4271 | kfree(rcu_dereference_raw(cgrp->name)); | 3806 | mutex_unlock(&cgroup_tree_mutex); |
| 4272 | err_free_cgrp: | ||
| 4273 | kfree(cgrp); | 3807 | kfree(cgrp); |
| 4274 | return err; | 3808 | return err; |
| 4275 | 3809 | ||
| 4276 | err_destroy: | 3810 | err_destroy: |
| 4277 | cgroup_destroy_locked(cgrp); | 3811 | cgroup_destroy_locked(cgrp); |
| 4278 | mutex_unlock(&cgroup_mutex); | 3812 | mutex_unlock(&cgroup_mutex); |
| 4279 | mutex_unlock(&dentry->d_inode->i_mutex); | 3813 | mutex_unlock(&cgroup_tree_mutex); |
| 4280 | return err; | 3814 | return err; |
| 4281 | } | 3815 | } |
| 4282 | 3816 | ||
| 4283 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | 3817 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, |
| 3818 | umode_t mode) | ||
| 4284 | { | 3819 | { |
| 4285 | struct cgroup *c_parent = dentry->d_parent->d_fsdata; | 3820 | struct cgroup *parent = parent_kn->priv; |
| 3821 | int ret; | ||
| 3822 | |||
| 3823 | /* | ||
| 3824 | * cgroup_create() grabs cgroup_tree_mutex which nests outside | ||
| 3825 | * kernfs active_ref and cgroup_create() already synchronizes | ||
| 3826 | * properly against removal through cgroup_lock_live_group(). | ||
| 3827 | * Break it before calling cgroup_create(). | ||
| 3828 | */ | ||
| 3829 | cgroup_get(parent); | ||
| 3830 | kernfs_break_active_protection(parent_kn); | ||
| 3831 | |||
| 3832 | ret = cgroup_create(parent, name, mode); | ||
| 4286 | 3833 | ||
| 4287 | /* the vfs holds inode->i_mutex already */ | 3834 | kernfs_unbreak_active_protection(parent_kn); |
| 4288 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); | 3835 | cgroup_put(parent); |
| 3836 | return ret; | ||
| 4289 | } | 3837 | } |
| 4290 | 3838 | ||
| 4291 | /* | 3839 | /* |
| @@ -4298,6 +3846,7 @@ static void css_killed_work_fn(struct work_struct *work) | |||
| 4298 | container_of(work, struct cgroup_subsys_state, destroy_work); | 3846 | container_of(work, struct cgroup_subsys_state, destroy_work); |
| 4299 | struct cgroup *cgrp = css->cgroup; | 3847 | struct cgroup *cgrp = css->cgroup; |
| 4300 | 3848 | ||
| 3849 | mutex_lock(&cgroup_tree_mutex); | ||
| 4301 | mutex_lock(&cgroup_mutex); | 3850 | mutex_lock(&cgroup_mutex); |
| 4302 | 3851 | ||
| 4303 | /* | 3852 | /* |
| @@ -4315,6 +3864,7 @@ static void css_killed_work_fn(struct work_struct *work) | |||
| 4315 | cgroup_destroy_css_killed(cgrp); | 3864 | cgroup_destroy_css_killed(cgrp); |
| 4316 | 3865 | ||
| 4317 | mutex_unlock(&cgroup_mutex); | 3866 | mutex_unlock(&cgroup_mutex); |
| 3867 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4318 | 3868 | ||
| 4319 | /* | 3869 | /* |
| 4320 | * Put the css refs from kill_css(). Each css holds an extra | 3870 | * Put the css refs from kill_css(). Each css holds an extra |
| @@ -4336,18 +3886,15 @@ static void css_killed_ref_fn(struct percpu_ref *ref) | |||
| 4336 | queue_work(cgroup_destroy_wq, &css->destroy_work); | 3886 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
| 4337 | } | 3887 | } |
| 4338 | 3888 | ||
| 4339 | /** | 3889 | static void __kill_css(struct cgroup_subsys_state *css) |
| 4340 | * kill_css - destroy a css | ||
| 4341 | * @css: css to destroy | ||
| 4342 | * | ||
| 4343 | * This function initiates destruction of @css by removing cgroup interface | ||
| 4344 | * files and putting its base reference. ->css_offline() will be invoked | ||
| 4345 | * asynchronously once css_tryget() is guaranteed to fail and when the | ||
| 4346 | * reference count reaches zero, @css will be released. | ||
| 4347 | */ | ||
| 4348 | static void kill_css(struct cgroup_subsys_state *css) | ||
| 4349 | { | 3890 | { |
| 4350 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | 3891 | lockdep_assert_held(&cgroup_tree_mutex); |
| 3892 | |||
| 3893 | /* | ||
| 3894 | * This must happen before css is disassociated with its cgroup. | ||
| 3895 | * See seq_css() for details. | ||
| 3896 | */ | ||
| 3897 | cgroup_clear_dir(css->cgroup, 1 << css->ss->id); | ||
| 4351 | 3898 | ||
| 4352 | /* | 3899 | /* |
| 4353 | * Killing would put the base ref, but we need to keep it alive | 3900 | * Killing would put the base ref, but we need to keep it alive |
| @@ -4369,6 +3916,28 @@ static void kill_css(struct cgroup_subsys_state *css) | |||
| 4369 | } | 3916 | } |
| 4370 | 3917 | ||
| 4371 | /** | 3918 | /** |
| 3919 | * kill_css - destroy a css | ||
| 3920 | * @css: css to destroy | ||
| 3921 | * | ||
| 3922 | * This function initiates destruction of @css by removing cgroup interface | ||
| 3923 | * files and putting its base reference. ->css_offline() will be invoked | ||
| 3924 | * asynchronously once css_tryget() is guaranteed to fail and when the | ||
| 3925 | * reference count reaches zero, @css will be released. | ||
| 3926 | */ | ||
| 3927 | static void kill_css(struct cgroup_subsys_state *css) | ||
| 3928 | { | ||
| 3929 | struct cgroup *cgrp = css->cgroup; | ||
| 3930 | |||
| 3931 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 3932 | |||
| 3933 | /* if already killed, noop */ | ||
| 3934 | if (cgrp->subsys_mask & (1 << css->ss->id)) { | ||
| 3935 | cgrp->subsys_mask &= ~(1 << css->ss->id); | ||
| 3936 | __kill_css(css); | ||
| 3937 | } | ||
| 3938 | } | ||
| 3939 | |||
| 3940 | /** | ||
| 4372 | * cgroup_destroy_locked - the first stage of cgroup destruction | 3941 | * cgroup_destroy_locked - the first stage of cgroup destruction |
| 4373 | * @cgrp: cgroup to be destroyed | 3942 | * @cgrp: cgroup to be destroyed |
| 4374 | * | 3943 | * |
| @@ -4395,22 +3964,21 @@ static void kill_css(struct cgroup_subsys_state *css) | |||
| 4395 | static int cgroup_destroy_locked(struct cgroup *cgrp) | 3964 | static int cgroup_destroy_locked(struct cgroup *cgrp) |
| 4396 | __releases(&cgroup_mutex) __acquires(&cgroup_mutex) | 3965 | __releases(&cgroup_mutex) __acquires(&cgroup_mutex) |
| 4397 | { | 3966 | { |
| 4398 | struct dentry *d = cgrp->dentry; | ||
| 4399 | struct cgroup_subsys_state *css; | ||
| 4400 | struct cgroup *child; | 3967 | struct cgroup *child; |
| 3968 | struct cgroup_subsys_state *css; | ||
| 4401 | bool empty; | 3969 | bool empty; |
| 4402 | int ssid; | 3970 | int ssid; |
| 4403 | 3971 | ||
| 4404 | lockdep_assert_held(&d->d_inode->i_mutex); | 3972 | lockdep_assert_held(&cgroup_tree_mutex); |
| 4405 | lockdep_assert_held(&cgroup_mutex); | 3973 | lockdep_assert_held(&cgroup_mutex); |
| 4406 | 3974 | ||
| 4407 | /* | 3975 | /* |
| 4408 | * css_set_lock synchronizes access to ->cset_links and prevents | 3976 | * css_set_rwsem synchronizes access to ->cset_links and prevents |
| 4409 | * @cgrp from being removed while __put_css_set() is in progress. | 3977 | * @cgrp from being removed while put_css_set() is in progress. |
| 4410 | */ | 3978 | */ |
| 4411 | read_lock(&css_set_lock); | 3979 | down_read(&css_set_rwsem); |
| 4412 | empty = list_empty(&cgrp->cset_links); | 3980 | empty = list_empty(&cgrp->cset_links); |
| 4413 | read_unlock(&css_set_lock); | 3981 | up_read(&css_set_rwsem); |
| 4414 | if (!empty) | 3982 | if (!empty) |
| 4415 | return -EBUSY; | 3983 | return -EBUSY; |
| 4416 | 3984 | ||
| @@ -4431,14 +3999,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4431 | return -EBUSY; | 3999 | return -EBUSY; |
| 4432 | 4000 | ||
| 4433 | /* | 4001 | /* |
| 4434 | * Initiate massacre of all css's. cgroup_destroy_css_killed() | ||
| 4435 | * will be invoked to perform the rest of destruction once the | ||
| 4436 | * percpu refs of all css's are confirmed to be killed. | ||
| 4437 | */ | ||
| 4438 | for_each_css(css, ssid, cgrp) | ||
| 4439 | kill_css(css); | ||
| 4440 | |||
| 4441 | /* | ||
| 4442 | * Mark @cgrp dead. This prevents further task migration and child | 4002 | * Mark @cgrp dead. This prevents further task migration and child |
| 4443 | * creation by disabling cgroup_lock_live_group(). Note that | 4003 | * creation by disabling cgroup_lock_live_group(). Note that |
| 4444 | * CGRP_DEAD assertion is depended upon by css_next_child() to | 4004 | * CGRP_DEAD assertion is depended upon by css_next_child() to |
| @@ -4447,6 +4007,17 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4447 | */ | 4007 | */ |
| 4448 | set_bit(CGRP_DEAD, &cgrp->flags); | 4008 | set_bit(CGRP_DEAD, &cgrp->flags); |
| 4449 | 4009 | ||
| 4010 | /* | ||
| 4011 | * Initiate massacre of all css's. cgroup_destroy_css_killed() | ||
| 4012 | * will be invoked to perform the rest of destruction once the | ||
| 4013 | * percpu refs of all css's are confirmed to be killed. This | ||
| 4014 | * involves removing the subsystem's files, drop cgroup_mutex. | ||
| 4015 | */ | ||
| 4016 | mutex_unlock(&cgroup_mutex); | ||
| 4017 | for_each_css(css, ssid, cgrp) | ||
| 4018 | kill_css(css); | ||
| 4019 | mutex_lock(&cgroup_mutex); | ||
| 4020 | |||
| 4450 | /* CGRP_DEAD is set, remove from ->release_list for the last time */ | 4021 | /* CGRP_DEAD is set, remove from ->release_list for the last time */ |
| 4451 | raw_spin_lock(&release_list_lock); | 4022 | raw_spin_lock(&release_list_lock); |
| 4452 | if (!list_empty(&cgrp->release_list)) | 4023 | if (!list_empty(&cgrp->release_list)) |
| @@ -4462,14 +4033,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4462 | if (!cgrp->nr_css) | 4033 | if (!cgrp->nr_css) |
| 4463 | cgroup_destroy_css_killed(cgrp); | 4034 | cgroup_destroy_css_killed(cgrp); |
| 4464 | 4035 | ||
| 4036 | /* remove @cgrp directory along with the base files */ | ||
| 4037 | mutex_unlock(&cgroup_mutex); | ||
| 4038 | |||
| 4465 | /* | 4039 | /* |
| 4466 | * Clear the base files and remove @cgrp directory. The removal | 4040 | * There are two control paths which try to determine cgroup from |
| 4467 | * puts the base ref but we aren't quite done with @cgrp yet, so | 4041 | * dentry without going through kernfs - cgroupstats_build() and |
| 4468 | * hold onto it. | 4042 | * css_tryget_from_dir(). Those are supported by RCU protecting |
| 4043 | * clearing of cgrp->kn->priv backpointer, which should happen | ||
| 4044 | * after all files under it have been removed. | ||
| 4469 | */ | 4045 | */ |
| 4470 | cgroup_addrm_files(cgrp, cgroup_base_files, false); | 4046 | kernfs_remove(cgrp->kn); /* @cgrp has an extra ref on its kn */ |
| 4471 | dget(d); | 4047 | RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); |
| 4472 | cgroup_d_remove_dir(d); | 4048 | |
| 4049 | mutex_lock(&cgroup_mutex); | ||
| 4473 | 4050 | ||
| 4474 | return 0; | 4051 | return 0; |
| 4475 | }; | 4052 | }; |
| @@ -4486,72 +4063,82 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4486 | static void cgroup_destroy_css_killed(struct cgroup *cgrp) | 4063 | static void cgroup_destroy_css_killed(struct cgroup *cgrp) |
| 4487 | { | 4064 | { |
| 4488 | struct cgroup *parent = cgrp->parent; | 4065 | struct cgroup *parent = cgrp->parent; |
| 4489 | struct dentry *d = cgrp->dentry; | ||
| 4490 | 4066 | ||
| 4067 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4491 | lockdep_assert_held(&cgroup_mutex); | 4068 | lockdep_assert_held(&cgroup_mutex); |
| 4492 | 4069 | ||
| 4493 | /* delete this cgroup from parent->children */ | 4070 | /* delete this cgroup from parent->children */ |
| 4494 | list_del_rcu(&cgrp->sibling); | 4071 | list_del_rcu(&cgrp->sibling); |
| 4495 | 4072 | ||
| 4496 | dput(d); | 4073 | cgroup_put(cgrp); |
| 4497 | 4074 | ||
| 4498 | set_bit(CGRP_RELEASABLE, &parent->flags); | 4075 | set_bit(CGRP_RELEASABLE, &parent->flags); |
| 4499 | check_for_release(parent); | 4076 | check_for_release(parent); |
| 4500 | } | 4077 | } |
| 4501 | 4078 | ||
| 4502 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | 4079 | static int cgroup_rmdir(struct kernfs_node *kn) |
| 4503 | { | 4080 | { |
| 4504 | int ret; | 4081 | struct cgroup *cgrp = kn->priv; |
| 4505 | 4082 | int ret = 0; | |
| 4506 | mutex_lock(&cgroup_mutex); | ||
| 4507 | ret = cgroup_destroy_locked(dentry->d_fsdata); | ||
| 4508 | mutex_unlock(&cgroup_mutex); | ||
| 4509 | 4083 | ||
| 4510 | return ret; | 4084 | /* |
| 4511 | } | 4085 | * This is self-destruction but @kn can't be removed while this |
| 4086 | * callback is in progress. Let's break active protection. Once | ||
| 4087 | * the protection is broken, @cgrp can be destroyed at any point. | ||
| 4088 | * Pin it so that it stays accessible. | ||
| 4089 | */ | ||
| 4090 | cgroup_get(cgrp); | ||
| 4091 | kernfs_break_active_protection(kn); | ||
| 4512 | 4092 | ||
| 4513 | static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) | 4093 | mutex_lock(&cgroup_tree_mutex); |
| 4514 | { | 4094 | mutex_lock(&cgroup_mutex); |
| 4515 | INIT_LIST_HEAD(&ss->cftsets); | ||
| 4516 | 4095 | ||
| 4517 | /* | 4096 | /* |
| 4518 | * base_cftset is embedded in subsys itself, no need to worry about | 4097 | * @cgrp might already have been destroyed while we're trying to |
| 4519 | * deregistration. | 4098 | * grab the mutexes. |
| 4520 | */ | 4099 | */ |
| 4521 | if (ss->base_cftypes) { | 4100 | if (!cgroup_is_dead(cgrp)) |
| 4522 | struct cftype *cft; | 4101 | ret = cgroup_destroy_locked(cgrp); |
| 4523 | 4102 | ||
| 4524 | for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++) | 4103 | mutex_unlock(&cgroup_mutex); |
| 4525 | cft->ss = ss; | 4104 | mutex_unlock(&cgroup_tree_mutex); |
| 4526 | 4105 | ||
| 4527 | ss->base_cftset.cfts = ss->base_cftypes; | 4106 | kernfs_unbreak_active_protection(kn); |
| 4528 | list_add_tail(&ss->base_cftset.node, &ss->cftsets); | 4107 | cgroup_put(cgrp); |
| 4529 | } | 4108 | return ret; |
| 4530 | } | 4109 | } |
| 4531 | 4110 | ||
| 4111 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { | ||
| 4112 | .remount_fs = cgroup_remount, | ||
| 4113 | .show_options = cgroup_show_options, | ||
| 4114 | .mkdir = cgroup_mkdir, | ||
| 4115 | .rmdir = cgroup_rmdir, | ||
| 4116 | .rename = cgroup_rename, | ||
| 4117 | }; | ||
| 4118 | |||
| 4532 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | 4119 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss) |
| 4533 | { | 4120 | { |
| 4534 | struct cgroup_subsys_state *css; | 4121 | struct cgroup_subsys_state *css; |
| 4535 | 4122 | ||
| 4536 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); | 4123 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); |
| 4537 | 4124 | ||
| 4125 | mutex_lock(&cgroup_tree_mutex); | ||
| 4538 | mutex_lock(&cgroup_mutex); | 4126 | mutex_lock(&cgroup_mutex); |
| 4539 | 4127 | ||
| 4540 | /* init base cftset */ | 4128 | INIT_LIST_HEAD(&ss->cfts); |
| 4541 | cgroup_init_cftsets(ss); | ||
| 4542 | 4129 | ||
| 4543 | /* Create the top cgroup state for this subsystem */ | 4130 | /* Create the root cgroup state for this subsystem */ |
| 4544 | ss->root = &cgroup_dummy_root; | 4131 | ss->root = &cgrp_dfl_root; |
| 4545 | css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); | 4132 | css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); |
| 4546 | /* We don't handle early failures gracefully */ | 4133 | /* We don't handle early failures gracefully */ |
| 4547 | BUG_ON(IS_ERR(css)); | 4134 | BUG_ON(IS_ERR(css)); |
| 4548 | init_css(css, ss, cgroup_dummy_top); | 4135 | init_css(css, ss, &cgrp_dfl_root.cgrp); |
| 4549 | 4136 | ||
| 4550 | /* Update the init_css_set to contain a subsys | 4137 | /* Update the init_css_set to contain a subsys |
| 4551 | * pointer to this state - since the subsystem is | 4138 | * pointer to this state - since the subsystem is |
| 4552 | * newly registered, all tasks and hence the | 4139 | * newly registered, all tasks and hence the |
| 4553 | * init_css_set is in the subsystem's top cgroup. */ | 4140 | * init_css_set is in the subsystem's root cgroup. */ |
| 4554 | init_css_set.subsys[ss->subsys_id] = css; | 4141 | init_css_set.subsys[ss->id] = css; |
| 4555 | 4142 | ||
| 4556 | need_forkexit_callback |= ss->fork || ss->exit; | 4143 | need_forkexit_callback |= ss->fork || ss->exit; |
| 4557 | 4144 | ||
| @@ -4562,185 +4149,11 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
| 4562 | 4149 | ||
| 4563 | BUG_ON(online_css(css)); | 4150 | BUG_ON(online_css(css)); |
| 4564 | 4151 | ||
| 4565 | mutex_unlock(&cgroup_mutex); | 4152 | cgrp_dfl_root.cgrp.subsys_mask |= 1 << ss->id; |
| 4566 | |||
| 4567 | /* this function shouldn't be used with modular subsystems, since they | ||
| 4568 | * need to register a subsys_id, among other things */ | ||
| 4569 | BUG_ON(ss->module); | ||
| 4570 | } | ||
| 4571 | |||
| 4572 | /** | ||
| 4573 | * cgroup_load_subsys: load and register a modular subsystem at runtime | ||
| 4574 | * @ss: the subsystem to load | ||
| 4575 | * | ||
| 4576 | * This function should be called in a modular subsystem's initcall. If the | ||
| 4577 | * subsystem is built as a module, it will be assigned a new subsys_id and set | ||
| 4578 | * up for use. If the subsystem is built-in anyway, work is delegated to the | ||
| 4579 | * simpler cgroup_init_subsys. | ||
| 4580 | */ | ||
| 4581 | int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | ||
| 4582 | { | ||
| 4583 | struct cgroup_subsys_state *css; | ||
| 4584 | int i, ret; | ||
| 4585 | struct hlist_node *tmp; | ||
| 4586 | struct css_set *cset; | ||
| 4587 | unsigned long key; | ||
| 4588 | |||
| 4589 | /* check name and function validity */ | ||
| 4590 | if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN || | ||
| 4591 | ss->css_alloc == NULL || ss->css_free == NULL) | ||
| 4592 | return -EINVAL; | ||
| 4593 | |||
| 4594 | /* | ||
| 4595 | * we don't support callbacks in modular subsystems. this check is | ||
| 4596 | * before the ss->module check for consistency; a subsystem that could | ||
| 4597 | * be a module should still have no callbacks even if the user isn't | ||
| 4598 | * compiling it as one. | ||
| 4599 | */ | ||
| 4600 | if (ss->fork || ss->exit) | ||
| 4601 | return -EINVAL; | ||
| 4602 | |||
| 4603 | /* | ||
| 4604 | * an optionally modular subsystem is built-in: we want to do nothing, | ||
| 4605 | * since cgroup_init_subsys will have already taken care of it. | ||
| 4606 | */ | ||
| 4607 | if (ss->module == NULL) { | ||
| 4608 | /* a sanity check */ | ||
| 4609 | BUG_ON(cgroup_subsys[ss->subsys_id] != ss); | ||
| 4610 | return 0; | ||
| 4611 | } | ||
| 4612 | |||
| 4613 | /* init base cftset */ | ||
| 4614 | cgroup_init_cftsets(ss); | ||
| 4615 | |||
| 4616 | mutex_lock(&cgroup_mutex); | ||
| 4617 | mutex_lock(&cgroup_root_mutex); | ||
| 4618 | cgroup_subsys[ss->subsys_id] = ss; | ||
| 4619 | |||
| 4620 | /* | ||
| 4621 | * no ss->css_alloc seems to need anything important in the ss | ||
| 4622 | * struct, so this can happen first (i.e. before the dummy root | ||
| 4623 | * attachment). | ||
| 4624 | */ | ||
| 4625 | css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); | ||
| 4626 | if (IS_ERR(css)) { | ||
| 4627 | /* failure case - need to deassign the cgroup_subsys[] slot. */ | ||
| 4628 | cgroup_subsys[ss->subsys_id] = NULL; | ||
| 4629 | mutex_unlock(&cgroup_root_mutex); | ||
| 4630 | mutex_unlock(&cgroup_mutex); | ||
| 4631 | return PTR_ERR(css); | ||
| 4632 | } | ||
| 4633 | |||
| 4634 | ss->root = &cgroup_dummy_root; | ||
| 4635 | |||
| 4636 | /* our new subsystem will be attached to the dummy hierarchy. */ | ||
| 4637 | init_css(css, ss, cgroup_dummy_top); | ||
| 4638 | |||
| 4639 | /* | ||
| 4640 | * Now we need to entangle the css into the existing css_sets. unlike | ||
| 4641 | * in cgroup_init_subsys, there are now multiple css_sets, so each one | ||
| 4642 | * will need a new pointer to it; done by iterating the css_set_table. | ||
| 4643 | * furthermore, modifying the existing css_sets will corrupt the hash | ||
| 4644 | * table state, so each changed css_set will need its hash recomputed. | ||
| 4645 | * this is all done under the css_set_lock. | ||
| 4646 | */ | ||
| 4647 | write_lock(&css_set_lock); | ||
| 4648 | hash_for_each_safe(css_set_table, i, tmp, cset, hlist) { | ||
| 4649 | /* skip entries that we already rehashed */ | ||
| 4650 | if (cset->subsys[ss->subsys_id]) | ||
| 4651 | continue; | ||
| 4652 | /* remove existing entry */ | ||
| 4653 | hash_del(&cset->hlist); | ||
| 4654 | /* set new value */ | ||
| 4655 | cset->subsys[ss->subsys_id] = css; | ||
| 4656 | /* recompute hash and restore entry */ | ||
| 4657 | key = css_set_hash(cset->subsys); | ||
| 4658 | hash_add(css_set_table, &cset->hlist, key); | ||
| 4659 | } | ||
| 4660 | write_unlock(&css_set_lock); | ||
| 4661 | |||
| 4662 | ret = online_css(css); | ||
| 4663 | if (ret) { | ||
| 4664 | ss->css_free(css); | ||
| 4665 | goto err_unload; | ||
| 4666 | } | ||
| 4667 | |||
| 4668 | /* success! */ | ||
| 4669 | mutex_unlock(&cgroup_root_mutex); | ||
| 4670 | mutex_unlock(&cgroup_mutex); | ||
| 4671 | return 0; | ||
| 4672 | |||
| 4673 | err_unload: | ||
| 4674 | mutex_unlock(&cgroup_root_mutex); | ||
| 4675 | mutex_unlock(&cgroup_mutex); | ||
| 4676 | /* @ss can't be mounted here as try_module_get() would fail */ | ||
| 4677 | cgroup_unload_subsys(ss); | ||
| 4678 | return ret; | ||
| 4679 | } | ||
| 4680 | EXPORT_SYMBOL_GPL(cgroup_load_subsys); | ||
| 4681 | |||
| 4682 | /** | ||
| 4683 | * cgroup_unload_subsys: unload a modular subsystem | ||
| 4684 | * @ss: the subsystem to unload | ||
| 4685 | * | ||
| 4686 | * This function should be called in a modular subsystem's exitcall. When this | ||
| 4687 | * function is invoked, the refcount on the subsystem's module will be 0, so | ||
| 4688 | * the subsystem will not be attached to any hierarchy. | ||
| 4689 | */ | ||
| 4690 | void cgroup_unload_subsys(struct cgroup_subsys *ss) | ||
| 4691 | { | ||
| 4692 | struct cgrp_cset_link *link; | ||
| 4693 | struct cgroup_subsys_state *css; | ||
| 4694 | |||
| 4695 | BUG_ON(ss->module == NULL); | ||
| 4696 | |||
| 4697 | /* | ||
| 4698 | * we shouldn't be called if the subsystem is in use, and the use of | ||
| 4699 | * try_module_get() in rebind_subsystems() should ensure that it | ||
| 4700 | * doesn't start being used while we're killing it off. | ||
| 4701 | */ | ||
| 4702 | BUG_ON(ss->root != &cgroup_dummy_root); | ||
| 4703 | |||
| 4704 | mutex_lock(&cgroup_mutex); | ||
| 4705 | mutex_lock(&cgroup_root_mutex); | ||
| 4706 | |||
| 4707 | css = cgroup_css(cgroup_dummy_top, ss); | ||
| 4708 | if (css) | ||
| 4709 | offline_css(css); | ||
| 4710 | 4153 | ||
| 4711 | /* deassign the subsys_id */ | ||
| 4712 | cgroup_subsys[ss->subsys_id] = NULL; | ||
| 4713 | |||
| 4714 | /* | ||
| 4715 | * disentangle the css from all css_sets attached to the dummy | ||
| 4716 | * top. as in loading, we need to pay our respects to the hashtable | ||
| 4717 | * gods. | ||
| 4718 | */ | ||
| 4719 | write_lock(&css_set_lock); | ||
| 4720 | list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) { | ||
| 4721 | struct css_set *cset = link->cset; | ||
| 4722 | unsigned long key; | ||
| 4723 | |||
| 4724 | hash_del(&cset->hlist); | ||
| 4725 | cset->subsys[ss->subsys_id] = NULL; | ||
| 4726 | key = css_set_hash(cset->subsys); | ||
| 4727 | hash_add(css_set_table, &cset->hlist, key); | ||
| 4728 | } | ||
| 4729 | write_unlock(&css_set_lock); | ||
| 4730 | |||
| 4731 | /* | ||
| 4732 | * remove subsystem's css from the cgroup_dummy_top and free it - | ||
| 4733 | * need to free before marking as null because ss->css_free needs | ||
| 4734 | * the cgrp->subsys pointer to find their state. | ||
| 4735 | */ | ||
| 4736 | if (css) | ||
| 4737 | ss->css_free(css); | ||
| 4738 | RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); | ||
| 4739 | |||
| 4740 | mutex_unlock(&cgroup_root_mutex); | ||
| 4741 | mutex_unlock(&cgroup_mutex); | 4154 | mutex_unlock(&cgroup_mutex); |
| 4155 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4742 | } | 4156 | } |
| 4743 | EXPORT_SYMBOL_GPL(cgroup_unload_subsys); | ||
| 4744 | 4157 | ||
| 4745 | /** | 4158 | /** |
| 4746 | * cgroup_init_early - cgroup initialization at system boot | 4159 | * cgroup_init_early - cgroup initialization at system boot |
| @@ -4750,34 +4163,24 @@ EXPORT_SYMBOL_GPL(cgroup_unload_subsys); | |||
| 4750 | */ | 4163 | */ |
| 4751 | int __init cgroup_init_early(void) | 4164 | int __init cgroup_init_early(void) |
| 4752 | { | 4165 | { |
| 4166 | static struct cgroup_sb_opts __initdata opts = | ||
| 4167 | { .flags = CGRP_ROOT_SANE_BEHAVIOR }; | ||
| 4753 | struct cgroup_subsys *ss; | 4168 | struct cgroup_subsys *ss; |
| 4754 | int i; | 4169 | int i; |
| 4755 | 4170 | ||
| 4756 | atomic_set(&init_css_set.refcount, 1); | 4171 | init_cgroup_root(&cgrp_dfl_root, &opts); |
| 4757 | INIT_LIST_HEAD(&init_css_set.cgrp_links); | ||
| 4758 | INIT_LIST_HEAD(&init_css_set.tasks); | ||
| 4759 | INIT_HLIST_NODE(&init_css_set.hlist); | ||
| 4760 | css_set_count = 1; | ||
| 4761 | init_cgroup_root(&cgroup_dummy_root); | ||
| 4762 | cgroup_root_count = 1; | ||
| 4763 | RCU_INIT_POINTER(init_task.cgroups, &init_css_set); | 4172 | RCU_INIT_POINTER(init_task.cgroups, &init_css_set); |
| 4764 | 4173 | ||
| 4765 | init_cgrp_cset_link.cset = &init_css_set; | 4174 | for_each_subsys(ss, i) { |
| 4766 | init_cgrp_cset_link.cgrp = cgroup_dummy_top; | 4175 | WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, |
| 4767 | list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); | 4176 | "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", |
| 4768 | list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); | 4177 | i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, |
| 4769 | 4178 | ss->id, ss->name); | |
| 4770 | /* at bootup time, we don't worry about modular subsystems */ | 4179 | WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, |
| 4771 | for_each_builtin_subsys(ss, i) { | 4180 | "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); |
| 4772 | BUG_ON(!ss->name); | 4181 | |
| 4773 | BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); | 4182 | ss->id = i; |
| 4774 | BUG_ON(!ss->css_alloc); | 4183 | ss->name = cgroup_subsys_name[i]; |
| 4775 | BUG_ON(!ss->css_free); | ||
| 4776 | if (ss->subsys_id != i) { | ||
| 4777 | printk(KERN_ERR "cgroup: Subsys %s id == %d\n", | ||
| 4778 | ss->name, ss->subsys_id); | ||
| 4779 | BUG(); | ||
| 4780 | } | ||
| 4781 | 4184 | ||
| 4782 | if (ss->early_init) | 4185 | if (ss->early_init) |
| 4783 | cgroup_init_subsys(ss); | 4186 | cgroup_init_subsys(ss); |
| @@ -4795,53 +4198,46 @@ int __init cgroup_init(void) | |||
| 4795 | { | 4198 | { |
| 4796 | struct cgroup_subsys *ss; | 4199 | struct cgroup_subsys *ss; |
| 4797 | unsigned long key; | 4200 | unsigned long key; |
| 4798 | int i, err; | 4201 | int ssid, err; |
| 4799 | 4202 | ||
| 4800 | err = bdi_init(&cgroup_backing_dev_info); | 4203 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); |
| 4801 | if (err) | ||
| 4802 | return err; | ||
| 4803 | 4204 | ||
| 4804 | for_each_builtin_subsys(ss, i) { | 4205 | mutex_lock(&cgroup_tree_mutex); |
| 4805 | if (!ss->early_init) | ||
| 4806 | cgroup_init_subsys(ss); | ||
| 4807 | } | ||
| 4808 | |||
| 4809 | /* allocate id for the dummy hierarchy */ | ||
| 4810 | mutex_lock(&cgroup_mutex); | 4206 | mutex_lock(&cgroup_mutex); |
| 4811 | mutex_lock(&cgroup_root_mutex); | ||
| 4812 | 4207 | ||
| 4813 | /* Add init_css_set to the hash table */ | 4208 | /* Add init_css_set to the hash table */ |
| 4814 | key = css_set_hash(init_css_set.subsys); | 4209 | key = css_set_hash(init_css_set.subsys); |
| 4815 | hash_add(css_set_table, &init_css_set.hlist, key); | 4210 | hash_add(css_set_table, &init_css_set.hlist, key); |
| 4816 | 4211 | ||
| 4817 | BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1)); | 4212 | BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); |
| 4818 | 4213 | ||
| 4819 | err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top, | ||
| 4820 | 0, 1, GFP_KERNEL); | ||
| 4821 | BUG_ON(err < 0); | ||
| 4822 | |||
| 4823 | mutex_unlock(&cgroup_root_mutex); | ||
| 4824 | mutex_unlock(&cgroup_mutex); | 4214 | mutex_unlock(&cgroup_mutex); |
| 4215 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4825 | 4216 | ||
| 4826 | cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); | 4217 | for_each_subsys(ss, ssid) { |
| 4827 | if (!cgroup_kobj) { | 4218 | if (!ss->early_init) |
| 4828 | err = -ENOMEM; | 4219 | cgroup_init_subsys(ss); |
| 4829 | goto out; | 4220 | |
| 4221 | /* | ||
| 4222 | * cftype registration needs kmalloc and can't be done | ||
| 4223 | * during early_init. Register base cftypes separately. | ||
| 4224 | */ | ||
| 4225 | if (ss->base_cftypes) | ||
| 4226 | WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes)); | ||
| 4830 | } | 4227 | } |
| 4831 | 4228 | ||
| 4229 | cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); | ||
| 4230 | if (!cgroup_kobj) | ||
| 4231 | return -ENOMEM; | ||
| 4232 | |||
| 4832 | err = register_filesystem(&cgroup_fs_type); | 4233 | err = register_filesystem(&cgroup_fs_type); |
| 4833 | if (err < 0) { | 4234 | if (err < 0) { |
| 4834 | kobject_put(cgroup_kobj); | 4235 | kobject_put(cgroup_kobj); |
| 4835 | goto out; | 4236 | return err; |
| 4836 | } | 4237 | } |
| 4837 | 4238 | ||
| 4838 | proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); | 4239 | proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); |
| 4839 | 4240 | return 0; | |
| 4840 | out: | ||
| 4841 | if (err) | ||
| 4842 | bdi_destroy(&cgroup_backing_dev_info); | ||
| 4843 | |||
| 4844 | return err; | ||
| 4845 | } | 4241 | } |
| 4846 | 4242 | ||
| 4847 | static int __init cgroup_wq_init(void) | 4243 | static int __init cgroup_wq_init(void) |
| @@ -4873,12 +4269,6 @@ core_initcall(cgroup_wq_init); | |||
| 4873 | * proc_cgroup_show() | 4269 | * proc_cgroup_show() |
| 4874 | * - Print task's cgroup paths into seq_file, one line for each hierarchy | 4270 | * - Print task's cgroup paths into seq_file, one line for each hierarchy |
| 4875 | * - Used for /proc/<pid>/cgroup. | 4271 | * - Used for /proc/<pid>/cgroup. |
| 4876 | * - No need to task_lock(tsk) on this tsk->cgroup reference, as it | ||
| 4877 | * doesn't really matter if tsk->cgroup changes after we read it, | ||
| 4878 | * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it | ||
| 4879 | * anyway. No need to check that tsk->cgroup != NULL, thanks to | ||
| 4880 | * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks | ||
| 4881 | * cgroup to top_cgroup. | ||
| 4882 | */ | 4272 | */ |
| 4883 | 4273 | ||
| 4884 | /* TODO: Use a proper seq_file iterator */ | 4274 | /* TODO: Use a proper seq_file iterator */ |
| @@ -4886,12 +4276,12 @@ int proc_cgroup_show(struct seq_file *m, void *v) | |||
| 4886 | { | 4276 | { |
| 4887 | struct pid *pid; | 4277 | struct pid *pid; |
| 4888 | struct task_struct *tsk; | 4278 | struct task_struct *tsk; |
| 4889 | char *buf; | 4279 | char *buf, *path; |
| 4890 | int retval; | 4280 | int retval; |
| 4891 | struct cgroupfs_root *root; | 4281 | struct cgroup_root *root; |
| 4892 | 4282 | ||
| 4893 | retval = -ENOMEM; | 4283 | retval = -ENOMEM; |
| 4894 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4284 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 4895 | if (!buf) | 4285 | if (!buf) |
| 4896 | goto out; | 4286 | goto out; |
| 4897 | 4287 | ||
| @@ -4904,29 +4294,36 @@ int proc_cgroup_show(struct seq_file *m, void *v) | |||
| 4904 | retval = 0; | 4294 | retval = 0; |
| 4905 | 4295 | ||
| 4906 | mutex_lock(&cgroup_mutex); | 4296 | mutex_lock(&cgroup_mutex); |
| 4297 | down_read(&css_set_rwsem); | ||
| 4907 | 4298 | ||
| 4908 | for_each_active_root(root) { | 4299 | for_each_root(root) { |
| 4909 | struct cgroup_subsys *ss; | 4300 | struct cgroup_subsys *ss; |
| 4910 | struct cgroup *cgrp; | 4301 | struct cgroup *cgrp; |
| 4911 | int ssid, count = 0; | 4302 | int ssid, count = 0; |
| 4912 | 4303 | ||
| 4304 | if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible) | ||
| 4305 | continue; | ||
| 4306 | |||
| 4913 | seq_printf(m, "%d:", root->hierarchy_id); | 4307 | seq_printf(m, "%d:", root->hierarchy_id); |
| 4914 | for_each_subsys(ss, ssid) | 4308 | for_each_subsys(ss, ssid) |
| 4915 | if (root->subsys_mask & (1 << ssid)) | 4309 | if (root->cgrp.subsys_mask & (1 << ssid)) |
| 4916 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); | 4310 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); |
| 4917 | if (strlen(root->name)) | 4311 | if (strlen(root->name)) |
| 4918 | seq_printf(m, "%sname=%s", count ? "," : "", | 4312 | seq_printf(m, "%sname=%s", count ? "," : "", |
| 4919 | root->name); | 4313 | root->name); |
| 4920 | seq_putc(m, ':'); | 4314 | seq_putc(m, ':'); |
| 4921 | cgrp = task_cgroup_from_root(tsk, root); | 4315 | cgrp = task_cgroup_from_root(tsk, root); |
| 4922 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); | 4316 | path = cgroup_path(cgrp, buf, PATH_MAX); |
| 4923 | if (retval < 0) | 4317 | if (!path) { |
| 4318 | retval = -ENAMETOOLONG; | ||
| 4924 | goto out_unlock; | 4319 | goto out_unlock; |
| 4925 | seq_puts(m, buf); | 4320 | } |
| 4321 | seq_puts(m, path); | ||
| 4926 | seq_putc(m, '\n'); | 4322 | seq_putc(m, '\n'); |
| 4927 | } | 4323 | } |
| 4928 | 4324 | ||
| 4929 | out_unlock: | 4325 | out_unlock: |
| 4326 | up_read(&css_set_rwsem); | ||
| 4930 | mutex_unlock(&cgroup_mutex); | 4327 | mutex_unlock(&cgroup_mutex); |
| 4931 | put_task_struct(tsk); | 4328 | put_task_struct(tsk); |
| 4932 | out_free: | 4329 | out_free: |
| @@ -4952,7 +4349,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) | |||
| 4952 | for_each_subsys(ss, i) | 4349 | for_each_subsys(ss, i) |
| 4953 | seq_printf(m, "%s\t%d\t%d\t%d\n", | 4350 | seq_printf(m, "%s\t%d\t%d\t%d\n", |
| 4954 | ss->name, ss->root->hierarchy_id, | 4351 | ss->name, ss->root->hierarchy_id, |
| 4955 | ss->root->number_of_cgroups, !ss->disabled); | 4352 | atomic_read(&ss->root->nr_cgrps), !ss->disabled); |
| 4956 | 4353 | ||
| 4957 | mutex_unlock(&cgroup_mutex); | 4354 | mutex_unlock(&cgroup_mutex); |
| 4958 | return 0; | 4355 | return 0; |
| @@ -4971,27 +4368,16 @@ static const struct file_operations proc_cgroupstats_operations = { | |||
| 4971 | }; | 4368 | }; |
| 4972 | 4369 | ||
| 4973 | /** | 4370 | /** |
| 4974 | * cgroup_fork - attach newly forked task to its parents cgroup. | 4371 | * cgroup_fork - initialize cgroup related fields during copy_process() |
| 4975 | * @child: pointer to task_struct of forking parent process. | 4372 | * @child: pointer to task_struct of forking parent process. |
| 4976 | * | 4373 | * |
| 4977 | * Description: A task inherits its parent's cgroup at fork(). | 4374 | * A task is associated with the init_css_set until cgroup_post_fork() |
| 4978 | * | 4375 | * attaches it to the parent's css_set. Empty cg_list indicates that |
| 4979 | * A pointer to the shared css_set was automatically copied in | 4376 | * @child isn't holding reference to its css_set. |
| 4980 | * fork.c by dup_task_struct(). However, we ignore that copy, since | ||
| 4981 | * it was not made under the protection of RCU or cgroup_mutex, so | ||
| 4982 | * might no longer be a valid cgroup pointer. cgroup_attach_task() might | ||
| 4983 | * have already changed current->cgroups, allowing the previously | ||
| 4984 | * referenced cgroup group to be removed and freed. | ||
| 4985 | * | ||
| 4986 | * At the point that cgroup_fork() is called, 'current' is the parent | ||
| 4987 | * task, and the passed argument 'child' points to the child task. | ||
| 4988 | */ | 4377 | */ |
| 4989 | void cgroup_fork(struct task_struct *child) | 4378 | void cgroup_fork(struct task_struct *child) |
| 4990 | { | 4379 | { |
| 4991 | task_lock(current); | 4380 | RCU_INIT_POINTER(child->cgroups, &init_css_set); |
| 4992 | get_css_set(task_css_set(current)); | ||
| 4993 | child->cgroups = current->cgroups; | ||
| 4994 | task_unlock(current); | ||
| 4995 | INIT_LIST_HEAD(&child->cg_list); | 4381 | INIT_LIST_HEAD(&child->cg_list); |
| 4996 | } | 4382 | } |
| 4997 | 4383 | ||
| @@ -5011,23 +4397,37 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5011 | int i; | 4397 | int i; |
| 5012 | 4398 | ||
| 5013 | /* | 4399 | /* |
| 5014 | * use_task_css_set_links is set to 1 before we walk the tasklist | 4400 | * This may race against cgroup_enable_task_cg_links(). As that |
| 5015 | * under the tasklist_lock and we read it here after we added the child | 4401 | * function sets use_task_css_set_links before grabbing |
| 5016 | * to the tasklist under the tasklist_lock as well. If the child wasn't | 4402 | * tasklist_lock and we just went through tasklist_lock to add |
| 5017 | * yet in the tasklist when we walked through it from | 4403 | * @child, it's guaranteed that either we see the set |
| 5018 | * cgroup_enable_task_cg_lists(), then use_task_css_set_links value | 4404 | * use_task_css_set_links or cgroup_enable_task_cg_lists() sees |
| 5019 | * should be visible now due to the paired locking and barriers implied | 4405 | * @child during its iteration. |
| 5020 | * by LOCK/UNLOCK: it is written before the tasklist_lock unlock | 4406 | * |
| 5021 | * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock | 4407 | * If we won the race, @child is associated with %current's |
| 5022 | * lock on fork. | 4408 | * css_set. Grabbing css_set_rwsem guarantees both that the |
| 4409 | * association is stable, and, on completion of the parent's | ||
| 4410 | * migration, @child is visible in the source of migration or | ||
| 4411 | * already in the destination cgroup. This guarantee is necessary | ||
| 4412 | * when implementing operations which need to migrate all tasks of | ||
| 4413 | * a cgroup to another. | ||
| 4414 | * | ||
| 4415 | * Note that if we lose to cgroup_enable_task_cg_links(), @child | ||
| 4416 | * will remain in init_css_set. This is safe because all tasks are | ||
| 4417 | * in the init_css_set before cg_links is enabled and there's no | ||
| 4418 | * operation which transfers all tasks out of init_css_set. | ||
| 5023 | */ | 4419 | */ |
| 5024 | if (use_task_css_set_links) { | 4420 | if (use_task_css_set_links) { |
| 5025 | write_lock(&css_set_lock); | 4421 | struct css_set *cset; |
| 5026 | task_lock(child); | 4422 | |
| 5027 | if (list_empty(&child->cg_list)) | 4423 | down_write(&css_set_rwsem); |
| 5028 | list_add(&child->cg_list, &task_css_set(child)->tasks); | 4424 | cset = task_css_set(current); |
| 5029 | task_unlock(child); | 4425 | if (list_empty(&child->cg_list)) { |
| 5030 | write_unlock(&css_set_lock); | 4426 | rcu_assign_pointer(child->cgroups, cset); |
| 4427 | list_add(&child->cg_list, &cset->tasks); | ||
| 4428 | get_css_set(cset); | ||
| 4429 | } | ||
| 4430 | up_write(&css_set_rwsem); | ||
| 5031 | } | 4431 | } |
| 5032 | 4432 | ||
| 5033 | /* | 4433 | /* |
| @@ -5036,15 +4436,7 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5036 | * and addition to css_set. | 4436 | * and addition to css_set. |
| 5037 | */ | 4437 | */ |
| 5038 | if (need_forkexit_callback) { | 4438 | if (need_forkexit_callback) { |
| 5039 | /* | 4439 | for_each_subsys(ss, i) |
| 5040 | * fork/exit callbacks are supported only for builtin | ||
| 5041 | * subsystems, and the builtin section of the subsys | ||
| 5042 | * array is immutable, so we don't need to lock the | ||
| 5043 | * subsys array here. On the other hand, modular section | ||
| 5044 | * of the array can be freed at module unload, so we | ||
| 5045 | * can't touch that. | ||
| 5046 | */ | ||
| 5047 | for_each_builtin_subsys(ss, i) | ||
| 5048 | if (ss->fork) | 4440 | if (ss->fork) |
| 5049 | ss->fork(child); | 4441 | ss->fork(child); |
| 5050 | } | 4442 | } |
| @@ -5053,7 +4445,6 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5053 | /** | 4445 | /** |
| 5054 | * cgroup_exit - detach cgroup from exiting task | 4446 | * cgroup_exit - detach cgroup from exiting task |
| 5055 | * @tsk: pointer to task_struct of exiting process | 4447 | * @tsk: pointer to task_struct of exiting process |
| 5056 | * @run_callback: run exit callbacks? | ||
| 5057 | * | 4448 | * |
| 5058 | * Description: Detach cgroup from @tsk and release it. | 4449 | * Description: Detach cgroup from @tsk and release it. |
| 5059 | * | 4450 | * |
| @@ -5063,57 +4454,38 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5063 | * use notify_on_release cgroups where very high task exit scaling | 4454 | * use notify_on_release cgroups where very high task exit scaling |
| 5064 | * is required on large systems. | 4455 | * is required on large systems. |
| 5065 | * | 4456 | * |
| 5066 | * the_top_cgroup_hack: | 4457 | * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We |
| 5067 | * | 4458 | * call cgroup_exit() while the task is still competent to handle |
| 5068 | * Set the exiting tasks cgroup to the root cgroup (top_cgroup). | 4459 | * notify_on_release(), then leave the task attached to the root cgroup in |
| 5069 | * | 4460 | * each hierarchy for the remainder of its exit. No need to bother with |
| 5070 | * We call cgroup_exit() while the task is still competent to | 4461 | * init_css_set refcnting. init_css_set never goes away and we can't race |
| 5071 | * handle notify_on_release(), then leave the task attached to the | 4462 | * with migration path - PF_EXITING is visible to migration path. |
| 5072 | * root cgroup in each hierarchy for the remainder of its exit. | ||
| 5073 | * | ||
| 5074 | * To do this properly, we would increment the reference count on | ||
| 5075 | * top_cgroup, and near the very end of the kernel/exit.c do_exit() | ||
| 5076 | * code we would add a second cgroup function call, to drop that | ||
| 5077 | * reference. This would just create an unnecessary hot spot on | ||
| 5078 | * the top_cgroup reference count, to no avail. | ||
| 5079 | * | ||
| 5080 | * Normally, holding a reference to a cgroup without bumping its | ||
| 5081 | * count is unsafe. The cgroup could go away, or someone could | ||
| 5082 | * attach us to a different cgroup, decrementing the count on | ||
| 5083 | * the first cgroup that we never incremented. But in this case, | ||
| 5084 | * top_cgroup isn't going away, and either task has PF_EXITING set, | ||
| 5085 | * which wards off any cgroup_attach_task() attempts, or task is a failed | ||
| 5086 | * fork, never visible to cgroup_attach_task. | ||
| 5087 | */ | 4463 | */ |
| 5088 | void cgroup_exit(struct task_struct *tsk, int run_callbacks) | 4464 | void cgroup_exit(struct task_struct *tsk) |
| 5089 | { | 4465 | { |
| 5090 | struct cgroup_subsys *ss; | 4466 | struct cgroup_subsys *ss; |
| 5091 | struct css_set *cset; | 4467 | struct css_set *cset; |
| 4468 | bool put_cset = false; | ||
| 5092 | int i; | 4469 | int i; |
| 5093 | 4470 | ||
| 5094 | /* | 4471 | /* |
| 5095 | * Unlink from the css_set task list if necessary. | 4472 | * Unlink from @tsk from its css_set. As migration path can't race |
| 5096 | * Optimistically check cg_list before taking | 4473 | * with us, we can check cg_list without grabbing css_set_rwsem. |
| 5097 | * css_set_lock | ||
| 5098 | */ | 4474 | */ |
| 5099 | if (!list_empty(&tsk->cg_list)) { | 4475 | if (!list_empty(&tsk->cg_list)) { |
| 5100 | write_lock(&css_set_lock); | 4476 | down_write(&css_set_rwsem); |
| 5101 | if (!list_empty(&tsk->cg_list)) | 4477 | list_del_init(&tsk->cg_list); |
| 5102 | list_del_init(&tsk->cg_list); | 4478 | up_write(&css_set_rwsem); |
| 5103 | write_unlock(&css_set_lock); | 4479 | put_cset = true; |
| 5104 | } | 4480 | } |
| 5105 | 4481 | ||
| 5106 | /* Reassign the task to the init_css_set. */ | 4482 | /* Reassign the task to the init_css_set. */ |
| 5107 | task_lock(tsk); | ||
| 5108 | cset = task_css_set(tsk); | 4483 | cset = task_css_set(tsk); |
| 5109 | RCU_INIT_POINTER(tsk->cgroups, &init_css_set); | 4484 | RCU_INIT_POINTER(tsk->cgroups, &init_css_set); |
| 5110 | 4485 | ||
| 5111 | if (run_callbacks && need_forkexit_callback) { | 4486 | if (need_forkexit_callback) { |
| 5112 | /* | 4487 | /* see cgroup_post_fork() for details */ |
| 5113 | * fork/exit callbacks are supported only for builtin | 4488 | for_each_subsys(ss, i) { |
| 5114 | * subsystems, see cgroup_post_fork() for details. | ||
| 5115 | */ | ||
| 5116 | for_each_builtin_subsys(ss, i) { | ||
| 5117 | if (ss->exit) { | 4489 | if (ss->exit) { |
| 5118 | struct cgroup_subsys_state *old_css = cset->subsys[i]; | 4490 | struct cgroup_subsys_state *old_css = cset->subsys[i]; |
| 5119 | struct cgroup_subsys_state *css = task_css(tsk, i); | 4491 | struct cgroup_subsys_state *css = task_css(tsk, i); |
| @@ -5122,9 +4494,9 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
| 5122 | } | 4494 | } |
| 5123 | } | 4495 | } |
| 5124 | } | 4496 | } |
| 5125 | task_unlock(tsk); | ||
| 5126 | 4497 | ||
| 5127 | put_css_set_taskexit(cset); | 4498 | if (put_cset) |
| 4499 | put_css_set(cset, true); | ||
| 5128 | } | 4500 | } |
| 5129 | 4501 | ||
| 5130 | static void check_for_release(struct cgroup *cgrp) | 4502 | static void check_for_release(struct cgroup *cgrp) |
| @@ -5181,16 +4553,17 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 5181 | while (!list_empty(&release_list)) { | 4553 | while (!list_empty(&release_list)) { |
| 5182 | char *argv[3], *envp[3]; | 4554 | char *argv[3], *envp[3]; |
| 5183 | int i; | 4555 | int i; |
| 5184 | char *pathbuf = NULL, *agentbuf = NULL; | 4556 | char *pathbuf = NULL, *agentbuf = NULL, *path; |
| 5185 | struct cgroup *cgrp = list_entry(release_list.next, | 4557 | struct cgroup *cgrp = list_entry(release_list.next, |
| 5186 | struct cgroup, | 4558 | struct cgroup, |
| 5187 | release_list); | 4559 | release_list); |
| 5188 | list_del_init(&cgrp->release_list); | 4560 | list_del_init(&cgrp->release_list); |
| 5189 | raw_spin_unlock(&release_list_lock); | 4561 | raw_spin_unlock(&release_list_lock); |
| 5190 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4562 | pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 5191 | if (!pathbuf) | 4563 | if (!pathbuf) |
| 5192 | goto continue_free; | 4564 | goto continue_free; |
| 5193 | if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) | 4565 | path = cgroup_path(cgrp, pathbuf, PATH_MAX); |
| 4566 | if (!path) | ||
| 5194 | goto continue_free; | 4567 | goto continue_free; |
| 5195 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); | 4568 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); |
| 5196 | if (!agentbuf) | 4569 | if (!agentbuf) |
| @@ -5198,7 +4571,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 5198 | 4571 | ||
| 5199 | i = 0; | 4572 | i = 0; |
| 5200 | argv[i++] = agentbuf; | 4573 | argv[i++] = agentbuf; |
| 5201 | argv[i++] = pathbuf; | 4574 | argv[i++] = path; |
| 5202 | argv[i] = NULL; | 4575 | argv[i] = NULL; |
| 5203 | 4576 | ||
| 5204 | i = 0; | 4577 | i = 0; |
| @@ -5232,11 +4605,7 @@ static int __init cgroup_disable(char *str) | |||
| 5232 | if (!*token) | 4605 | if (!*token) |
| 5233 | continue; | 4606 | continue; |
| 5234 | 4607 | ||
| 5235 | /* | 4608 | for_each_subsys(ss, i) { |
| 5236 | * cgroup_disable, being at boot time, can't know about | ||
| 5237 | * module subsystems, so we don't worry about them. | ||
| 5238 | */ | ||
| 5239 | for_each_builtin_subsys(ss, i) { | ||
| 5240 | if (!strcmp(token, ss->name)) { | 4609 | if (!strcmp(token, ss->name)) { |
| 5241 | ss->disabled = 1; | 4610 | ss->disabled = 1; |
| 5242 | printk(KERN_INFO "Disabling %s control group" | 4611 | printk(KERN_INFO "Disabling %s control group" |
| @@ -5250,28 +4619,42 @@ static int __init cgroup_disable(char *str) | |||
| 5250 | __setup("cgroup_disable=", cgroup_disable); | 4619 | __setup("cgroup_disable=", cgroup_disable); |
| 5251 | 4620 | ||
| 5252 | /** | 4621 | /** |
| 5253 | * css_from_dir - get corresponding css from the dentry of a cgroup dir | 4622 | * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir |
| 5254 | * @dentry: directory dentry of interest | 4623 | * @dentry: directory dentry of interest |
| 5255 | * @ss: subsystem of interest | 4624 | * @ss: subsystem of interest |
| 5256 | * | 4625 | * |
| 5257 | * Must be called under cgroup_mutex or RCU read lock. The caller is | 4626 | * If @dentry is a directory for a cgroup which has @ss enabled on it, try |
| 5258 | * responsible for pinning the returned css if it needs to be accessed | 4627 | * to get the corresponding css and return it. If such css doesn't exist |
| 5259 | * outside the critical section. | 4628 | * or can't be pinned, an ERR_PTR value is returned. |
| 5260 | */ | 4629 | */ |
| 5261 | struct cgroup_subsys_state *css_from_dir(struct dentry *dentry, | 4630 | struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry, |
| 5262 | struct cgroup_subsys *ss) | 4631 | struct cgroup_subsys *ss) |
| 5263 | { | 4632 | { |
| 4633 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); | ||
| 4634 | struct cgroup_subsys_state *css = NULL; | ||
| 5264 | struct cgroup *cgrp; | 4635 | struct cgroup *cgrp; |
| 5265 | 4636 | ||
| 5266 | cgroup_assert_mutex_or_rcu_locked(); | ||
| 5267 | |||
| 5268 | /* is @dentry a cgroup dir? */ | 4637 | /* is @dentry a cgroup dir? */ |
| 5269 | if (!dentry->d_inode || | 4638 | if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || |
| 5270 | dentry->d_inode->i_op != &cgroup_dir_inode_operations) | 4639 | kernfs_type(kn) != KERNFS_DIR) |
| 5271 | return ERR_PTR(-EBADF); | 4640 | return ERR_PTR(-EBADF); |
| 5272 | 4641 | ||
| 5273 | cgrp = __d_cgrp(dentry); | 4642 | rcu_read_lock(); |
| 5274 | return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT); | 4643 | |
| 4644 | /* | ||
| 4645 | * This path doesn't originate from kernfs and @kn could already | ||
| 4646 | * have been or be removed at any point. @kn->priv is RCU | ||
| 4647 | * protected for this access. See destroy_locked() for details. | ||
| 4648 | */ | ||
| 4649 | cgrp = rcu_dereference(kn->priv); | ||
| 4650 | if (cgrp) | ||
| 4651 | css = cgroup_css(cgrp, ss); | ||
| 4652 | |||
| 4653 | if (!css || !css_tryget(css)) | ||
| 4654 | css = ERR_PTR(-ENOENT); | ||
| 4655 | |||
| 4656 | rcu_read_unlock(); | ||
| 4657 | return css; | ||
| 5275 | } | 4658 | } |
| 5276 | 4659 | ||
| 5277 | /** | 4660 | /** |
| @@ -5286,7 +4669,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) | |||
| 5286 | { | 4669 | { |
| 5287 | struct cgroup *cgrp; | 4670 | struct cgroup *cgrp; |
| 5288 | 4671 | ||
| 5289 | cgroup_assert_mutex_or_rcu_locked(); | 4672 | cgroup_assert_mutexes_or_rcu_locked(); |
| 5290 | 4673 | ||
| 5291 | cgrp = idr_find(&ss->root->cgroup_idr, id); | 4674 | cgrp = idr_find(&ss->root->cgroup_idr, id); |
| 5292 | if (cgrp) | 4675 | if (cgrp) |
| @@ -5338,23 +4721,25 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) | |||
| 5338 | { | 4721 | { |
| 5339 | struct cgrp_cset_link *link; | 4722 | struct cgrp_cset_link *link; |
| 5340 | struct css_set *cset; | 4723 | struct css_set *cset; |
| 4724 | char *name_buf; | ||
| 5341 | 4725 | ||
| 5342 | read_lock(&css_set_lock); | 4726 | name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); |
| 4727 | if (!name_buf) | ||
| 4728 | return -ENOMEM; | ||
| 4729 | |||
| 4730 | down_read(&css_set_rwsem); | ||
| 5343 | rcu_read_lock(); | 4731 | rcu_read_lock(); |
| 5344 | cset = rcu_dereference(current->cgroups); | 4732 | cset = rcu_dereference(current->cgroups); |
| 5345 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { | 4733 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
| 5346 | struct cgroup *c = link->cgrp; | 4734 | struct cgroup *c = link->cgrp; |
| 5347 | const char *name; | ||
| 5348 | 4735 | ||
| 5349 | if (c->dentry) | 4736 | cgroup_name(c, name_buf, NAME_MAX + 1); |
| 5350 | name = c->dentry->d_name.name; | ||
| 5351 | else | ||
| 5352 | name = "?"; | ||
| 5353 | seq_printf(seq, "Root %d group %s\n", | 4737 | seq_printf(seq, "Root %d group %s\n", |
| 5354 | c->root->hierarchy_id, name); | 4738 | c->root->hierarchy_id, name_buf); |
| 5355 | } | 4739 | } |
| 5356 | rcu_read_unlock(); | 4740 | rcu_read_unlock(); |
| 5357 | read_unlock(&css_set_lock); | 4741 | up_read(&css_set_rwsem); |
| 4742 | kfree(name_buf); | ||
| 5358 | return 0; | 4743 | return 0; |
| 5359 | } | 4744 | } |
| 5360 | 4745 | ||
| @@ -5364,23 +4749,30 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) | |||
| 5364 | struct cgroup_subsys_state *css = seq_css(seq); | 4749 | struct cgroup_subsys_state *css = seq_css(seq); |
| 5365 | struct cgrp_cset_link *link; | 4750 | struct cgrp_cset_link *link; |
| 5366 | 4751 | ||
| 5367 | read_lock(&css_set_lock); | 4752 | down_read(&css_set_rwsem); |
| 5368 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { | 4753 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { |
| 5369 | struct css_set *cset = link->cset; | 4754 | struct css_set *cset = link->cset; |
| 5370 | struct task_struct *task; | 4755 | struct task_struct *task; |
| 5371 | int count = 0; | 4756 | int count = 0; |
| 4757 | |||
| 5372 | seq_printf(seq, "css_set %p\n", cset); | 4758 | seq_printf(seq, "css_set %p\n", cset); |
| 4759 | |||
| 5373 | list_for_each_entry(task, &cset->tasks, cg_list) { | 4760 | list_for_each_entry(task, &cset->tasks, cg_list) { |
| 5374 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) { | 4761 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) |
| 5375 | seq_puts(seq, " ...\n"); | 4762 | goto overflow; |
| 5376 | break; | 4763 | seq_printf(seq, " task %d\n", task_pid_vnr(task)); |
| 5377 | } else { | 4764 | } |
| 5378 | seq_printf(seq, " task %d\n", | 4765 | |
| 5379 | task_pid_vnr(task)); | 4766 | list_for_each_entry(task, &cset->mg_tasks, cg_list) { |
| 5380 | } | 4767 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) |
| 4768 | goto overflow; | ||
| 4769 | seq_printf(seq, " task %d\n", task_pid_vnr(task)); | ||
| 5381 | } | 4770 | } |
| 4771 | continue; | ||
| 4772 | overflow: | ||
| 4773 | seq_puts(seq, " ...\n"); | ||
| 5382 | } | 4774 | } |
| 5383 | read_unlock(&css_set_lock); | 4775 | up_read(&css_set_rwsem); |
| 5384 | return 0; | 4776 | return 0; |
| 5385 | } | 4777 | } |
| 5386 | 4778 | ||
| @@ -5423,11 +4815,9 @@ static struct cftype debug_files[] = { | |||
| 5423 | { } /* terminate */ | 4815 | { } /* terminate */ |
| 5424 | }; | 4816 | }; |
| 5425 | 4817 | ||
| 5426 | struct cgroup_subsys debug_subsys = { | 4818 | struct cgroup_subsys debug_cgrp_subsys = { |
| 5427 | .name = "debug", | ||
| 5428 | .css_alloc = debug_css_alloc, | 4819 | .css_alloc = debug_css_alloc, |
| 5429 | .css_free = debug_css_free, | 4820 | .css_free = debug_css_free, |
| 5430 | .subsys_id = debug_subsys_id, | ||
| 5431 | .base_cftypes = debug_files, | 4821 | .base_cftypes = debug_files, |
| 5432 | }; | 4822 | }; |
| 5433 | #endif /* CONFIG_CGROUP_DEBUG */ | 4823 | #endif /* CONFIG_CGROUP_DEBUG */ |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6c3154e477f6..2bc4a2256444 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
| @@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) | |||
| 52 | 52 | ||
| 53 | static inline struct freezer *task_freezer(struct task_struct *task) | 53 | static inline struct freezer *task_freezer(struct task_struct *task) |
| 54 | { | 54 | { |
| 55 | return css_freezer(task_css(task, freezer_subsys_id)); | 55 | return css_freezer(task_css(task, freezer_cgrp_id)); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static struct freezer *parent_freezer(struct freezer *freezer) | 58 | static struct freezer *parent_freezer(struct freezer *freezer) |
| @@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) | |||
| 84 | return "THAWED"; | 84 | return "THAWED"; |
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | struct cgroup_subsys freezer_subsys; | ||
| 88 | |||
| 89 | static struct cgroup_subsys_state * | 87 | static struct cgroup_subsys_state * |
| 90 | freezer_css_alloc(struct cgroup_subsys_state *parent_css) | 88 | freezer_css_alloc(struct cgroup_subsys_state *parent_css) |
| 91 | { | 89 | { |
| @@ -189,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, | |||
| 189 | * current state before executing the following - !frozen tasks may | 187 | * current state before executing the following - !frozen tasks may |
| 190 | * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. | 188 | * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. |
| 191 | */ | 189 | */ |
| 192 | cgroup_taskset_for_each(task, new_css, tset) { | 190 | cgroup_taskset_for_each(task, tset) { |
| 193 | if (!(freezer->state & CGROUP_FREEZING)) { | 191 | if (!(freezer->state & CGROUP_FREEZING)) { |
| 194 | __thaw_task(task); | 192 | __thaw_task(task); |
| 195 | } else { | 193 | } else { |
| @@ -216,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, | |||
| 216 | } | 214 | } |
| 217 | } | 215 | } |
| 218 | 216 | ||
| 217 | /** | ||
| 218 | * freezer_fork - cgroup post fork callback | ||
| 219 | * @task: a task which has just been forked | ||
| 220 | * | ||
| 221 | * @task has just been created and should conform to the current state of | ||
| 222 | * the cgroup_freezer it belongs to. This function may race against | ||
| 223 | * freezer_attach(). Losing to freezer_attach() means that we don't have | ||
| 224 | * to do anything as freezer_attach() will put @task into the appropriate | ||
| 225 | * state. | ||
| 226 | */ | ||
| 219 | static void freezer_fork(struct task_struct *task) | 227 | static void freezer_fork(struct task_struct *task) |
| 220 | { | 228 | { |
| 221 | struct freezer *freezer; | 229 | struct freezer *freezer; |
| @@ -224,14 +232,26 @@ static void freezer_fork(struct task_struct *task) | |||
| 224 | freezer = task_freezer(task); | 232 | freezer = task_freezer(task); |
| 225 | 233 | ||
| 226 | /* | 234 | /* |
| 227 | * The root cgroup is non-freezable, so we can skip the | 235 | * The root cgroup is non-freezable, so we can skip locking the |
| 228 | * following check. | 236 | * freezer. This is safe regardless of race with task migration. |
| 237 | * If we didn't race or won, skipping is obviously the right thing | ||
| 238 | * to do. If we lost and root is the new cgroup, noop is still the | ||
| 239 | * right thing to do. | ||
| 229 | */ | 240 | */ |
| 230 | if (!parent_freezer(freezer)) | 241 | if (!parent_freezer(freezer)) |
| 231 | goto out; | 242 | goto out; |
| 232 | 243 | ||
| 244 | /* | ||
| 245 | * Grab @freezer->lock and freeze @task after verifying @task still | ||
| 246 | * belongs to @freezer and it's freezing. The former is for the | ||
| 247 | * case where we have raced against task migration and lost and | ||
| 248 | * @task is already in a different cgroup which may not be frozen. | ||
| 249 | * This isn't strictly necessary as freeze_task() is allowed to be | ||
| 250 | * called spuriously but let's do it anyway for, if nothing else, | ||
| 251 | * documentation. | ||
| 252 | */ | ||
| 233 | spin_lock_irq(&freezer->lock); | 253 | spin_lock_irq(&freezer->lock); |
| 234 | if (freezer->state & CGROUP_FREEZING) | 254 | if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) |
| 235 | freeze_task(task); | 255 | freeze_task(task); |
| 236 | spin_unlock_irq(&freezer->lock); | 256 | spin_unlock_irq(&freezer->lock); |
| 237 | out: | 257 | out: |
| @@ -422,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) | |||
| 422 | } | 442 | } |
| 423 | 443 | ||
| 424 | static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, | 444 | static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, |
| 425 | const char *buffer) | 445 | char *buffer) |
| 426 | { | 446 | { |
| 427 | bool freeze; | 447 | bool freeze; |
| 428 | 448 | ||
| @@ -473,13 +493,11 @@ static struct cftype files[] = { | |||
| 473 | { } /* terminate */ | 493 | { } /* terminate */ |
| 474 | }; | 494 | }; |
| 475 | 495 | ||
| 476 | struct cgroup_subsys freezer_subsys = { | 496 | struct cgroup_subsys freezer_cgrp_subsys = { |
| 477 | .name = "freezer", | ||
| 478 | .css_alloc = freezer_css_alloc, | 497 | .css_alloc = freezer_css_alloc, |
| 479 | .css_online = freezer_css_online, | 498 | .css_online = freezer_css_online, |
| 480 | .css_offline = freezer_css_offline, | 499 | .css_offline = freezer_css_offline, |
| 481 | .css_free = freezer_css_free, | 500 | .css_free = freezer_css_free, |
| 482 | .subsys_id = freezer_subsys_id, | ||
| 483 | .attach = freezer_attach, | 501 | .attach = freezer_attach, |
| 484 | .fork = freezer_fork, | 502 | .fork = freezer_fork, |
| 485 | .base_cftypes = files, | 503 | .base_cftypes = files, |
diff --git a/kernel/compat.c b/kernel/compat.c index 0a09e481b70b..e40b0430b562 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -30,28 +30,6 @@ | |||
| 30 | 30 | ||
| 31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
| 32 | 32 | ||
| 33 | /* | ||
| 34 | * Get/set struct timeval with struct timespec on the native side | ||
| 35 | */ | ||
| 36 | static int compat_get_timeval_convert(struct timespec *o, | ||
| 37 | struct compat_timeval __user *i) | ||
| 38 | { | ||
| 39 | long usec; | ||
| 40 | |||
| 41 | if (get_user(o->tv_sec, &i->tv_sec) || | ||
| 42 | get_user(usec, &i->tv_usec)) | ||
| 43 | return -EFAULT; | ||
| 44 | o->tv_nsec = usec * 1000; | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | static int compat_put_timeval_convert(struct compat_timeval __user *o, | ||
| 49 | struct timeval *i) | ||
| 50 | { | ||
| 51 | return (put_user(i->tv_sec, &o->tv_sec) || | ||
| 52 | put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) | 33 | static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) |
| 56 | { | 34 | { |
| 57 | memset(txc, 0, sizeof(struct timex)); | 35 | memset(txc, 0, sizeof(struct timex)); |
| @@ -110,13 +88,13 @@ static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) | |||
| 110 | return 0; | 88 | return 0; |
| 111 | } | 89 | } |
| 112 | 90 | ||
| 113 | asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, | 91 | COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, |
| 114 | struct timezone __user *tz) | 92 | struct timezone __user *, tz) |
| 115 | { | 93 | { |
| 116 | if (tv) { | 94 | if (tv) { |
| 117 | struct timeval ktv; | 95 | struct timeval ktv; |
| 118 | do_gettimeofday(&ktv); | 96 | do_gettimeofday(&ktv); |
| 119 | if (compat_put_timeval_convert(tv, &ktv)) | 97 | if (compat_put_timeval(&ktv, tv)) |
| 120 | return -EFAULT; | 98 | return -EFAULT; |
| 121 | } | 99 | } |
| 122 | if (tz) { | 100 | if (tz) { |
| @@ -127,62 +105,61 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, | |||
| 127 | return 0; | 105 | return 0; |
| 128 | } | 106 | } |
| 129 | 107 | ||
| 130 | asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, | 108 | COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv, |
| 131 | struct timezone __user *tz) | 109 | struct timezone __user *, tz) |
| 132 | { | 110 | { |
| 133 | struct timespec kts; | 111 | struct timeval user_tv; |
| 134 | struct timezone ktz; | 112 | struct timespec new_ts; |
| 113 | struct timezone new_tz; | ||
| 135 | 114 | ||
| 136 | if (tv) { | 115 | if (tv) { |
| 137 | if (compat_get_timeval_convert(&kts, tv)) | 116 | if (compat_get_timeval(&user_tv, tv)) |
| 138 | return -EFAULT; | 117 | return -EFAULT; |
| 118 | new_ts.tv_sec = user_tv.tv_sec; | ||
| 119 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; | ||
| 139 | } | 120 | } |
| 140 | if (tz) { | 121 | if (tz) { |
| 141 | if (copy_from_user(&ktz, tz, sizeof(ktz))) | 122 | if (copy_from_user(&new_tz, tz, sizeof(*tz))) |
| 142 | return -EFAULT; | 123 | return -EFAULT; |
| 143 | } | 124 | } |
| 144 | 125 | ||
| 145 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); | 126 | return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); |
| 146 | } | 127 | } |
| 147 | 128 | ||
| 148 | int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) | 129 | static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) |
| 149 | { | 130 | { |
| 150 | return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || | 131 | return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || |
| 151 | __get_user(tv->tv_sec, &ctv->tv_sec) || | 132 | __get_user(tv->tv_sec, &ctv->tv_sec) || |
| 152 | __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; | 133 | __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; |
| 153 | } | 134 | } |
| 154 | EXPORT_SYMBOL_GPL(get_compat_timeval); | ||
| 155 | 135 | ||
| 156 | int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) | 136 | static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) |
| 157 | { | 137 | { |
| 158 | return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || | 138 | return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || |
| 159 | __put_user(tv->tv_sec, &ctv->tv_sec) || | 139 | __put_user(tv->tv_sec, &ctv->tv_sec) || |
| 160 | __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; | 140 | __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; |
| 161 | } | 141 | } |
| 162 | EXPORT_SYMBOL_GPL(put_compat_timeval); | ||
| 163 | 142 | ||
| 164 | int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) | 143 | static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts) |
| 165 | { | 144 | { |
| 166 | return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || | 145 | return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || |
| 167 | __get_user(ts->tv_sec, &cts->tv_sec) || | 146 | __get_user(ts->tv_sec, &cts->tv_sec) || |
| 168 | __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | 147 | __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; |
| 169 | } | 148 | } |
| 170 | EXPORT_SYMBOL_GPL(get_compat_timespec); | ||
| 171 | 149 | ||
| 172 | int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) | 150 | static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) |
| 173 | { | 151 | { |
| 174 | return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || | 152 | return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || |
| 175 | __put_user(ts->tv_sec, &cts->tv_sec) || | 153 | __put_user(ts->tv_sec, &cts->tv_sec) || |
| 176 | __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | 154 | __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; |
| 177 | } | 155 | } |
| 178 | EXPORT_SYMBOL_GPL(put_compat_timespec); | ||
| 179 | 156 | ||
| 180 | int compat_get_timeval(struct timeval *tv, const void __user *utv) | 157 | int compat_get_timeval(struct timeval *tv, const void __user *utv) |
| 181 | { | 158 | { |
| 182 | if (COMPAT_USE_64BIT_TIME) | 159 | if (COMPAT_USE_64BIT_TIME) |
| 183 | return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; | 160 | return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; |
| 184 | else | 161 | else |
| 185 | return get_compat_timeval(tv, utv); | 162 | return __compat_get_timeval(tv, utv); |
| 186 | } | 163 | } |
| 187 | EXPORT_SYMBOL_GPL(compat_get_timeval); | 164 | EXPORT_SYMBOL_GPL(compat_get_timeval); |
| 188 | 165 | ||
| @@ -191,7 +168,7 @@ int compat_put_timeval(const struct timeval *tv, void __user *utv) | |||
| 191 | if (COMPAT_USE_64BIT_TIME) | 168 | if (COMPAT_USE_64BIT_TIME) |
| 192 | return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; | 169 | return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; |
| 193 | else | 170 | else |
| 194 | return put_compat_timeval(tv, utv); | 171 | return __compat_put_timeval(tv, utv); |
| 195 | } | 172 | } |
| 196 | EXPORT_SYMBOL_GPL(compat_put_timeval); | 173 | EXPORT_SYMBOL_GPL(compat_put_timeval); |
| 197 | 174 | ||
| @@ -200,7 +177,7 @@ int compat_get_timespec(struct timespec *ts, const void __user *uts) | |||
| 200 | if (COMPAT_USE_64BIT_TIME) | 177 | if (COMPAT_USE_64BIT_TIME) |
| 201 | return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; | 178 | return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; |
| 202 | else | 179 | else |
| 203 | return get_compat_timespec(ts, uts); | 180 | return __compat_get_timespec(ts, uts); |
| 204 | } | 181 | } |
| 205 | EXPORT_SYMBOL_GPL(compat_get_timespec); | 182 | EXPORT_SYMBOL_GPL(compat_get_timespec); |
| 206 | 183 | ||
| @@ -209,10 +186,33 @@ int compat_put_timespec(const struct timespec *ts, void __user *uts) | |||
| 209 | if (COMPAT_USE_64BIT_TIME) | 186 | if (COMPAT_USE_64BIT_TIME) |
| 210 | return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; | 187 | return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; |
| 211 | else | 188 | else |
| 212 | return put_compat_timespec(ts, uts); | 189 | return __compat_put_timespec(ts, uts); |
| 213 | } | 190 | } |
| 214 | EXPORT_SYMBOL_GPL(compat_put_timespec); | 191 | EXPORT_SYMBOL_GPL(compat_put_timespec); |
| 215 | 192 | ||
| 193 | int compat_convert_timespec(struct timespec __user **kts, | ||
| 194 | const void __user *cts) | ||
| 195 | { | ||
| 196 | struct timespec ts; | ||
| 197 | struct timespec __user *uts; | ||
| 198 | |||
| 199 | if (!cts || COMPAT_USE_64BIT_TIME) { | ||
| 200 | *kts = (struct timespec __user *)cts; | ||
| 201 | return 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | uts = compat_alloc_user_space(sizeof(ts)); | ||
| 205 | if (!uts) | ||
| 206 | return -EFAULT; | ||
| 207 | if (compat_get_timespec(&ts, cts)) | ||
| 208 | return -EFAULT; | ||
| 209 | if (copy_to_user(uts, &ts, sizeof(ts))) | ||
| 210 | return -EFAULT; | ||
| 211 | |||
| 212 | *kts = uts; | ||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | static long compat_nanosleep_restart(struct restart_block *restart) | 216 | static long compat_nanosleep_restart(struct restart_block *restart) |
| 217 | { | 217 | { |
| 218 | struct compat_timespec __user *rmtp; | 218 | struct compat_timespec __user *rmtp; |
| @@ -229,21 +229,21 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
| 229 | if (ret) { | 229 | if (ret) { |
| 230 | rmtp = restart->nanosleep.compat_rmtp; | 230 | rmtp = restart->nanosleep.compat_rmtp; |
| 231 | 231 | ||
| 232 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| 233 | return -EFAULT; | 233 | return -EFAULT; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | return ret; | 236 | return ret; |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | 239 | COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, |
| 240 | struct compat_timespec __user *rmtp) | 240 | struct compat_timespec __user *, rmtp) |
| 241 | { | 241 | { |
| 242 | struct timespec tu, rmt; | 242 | struct timespec tu, rmt; |
| 243 | mm_segment_t oldfs; | 243 | mm_segment_t oldfs; |
| 244 | long ret; | 244 | long ret; |
| 245 | 245 | ||
| 246 | if (get_compat_timespec(&tu, rqtp)) | 246 | if (compat_get_timespec(&tu, rqtp)) |
| 247 | return -EFAULT; | 247 | return -EFAULT; |
| 248 | 248 | ||
| 249 | if (!timespec_valid(&tu)) | 249 | if (!timespec_valid(&tu)) |
| @@ -263,7 +263,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | |||
| 263 | restart->fn = compat_nanosleep_restart; | 263 | restart->fn = compat_nanosleep_restart; |
| 264 | restart->nanosleep.compat_rmtp = rmtp; | 264 | restart->nanosleep.compat_rmtp = rmtp; |
| 265 | 265 | ||
| 266 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 266 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| 267 | return -EFAULT; | 267 | return -EFAULT; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| @@ -328,7 +328,7 @@ static compat_clock_t clock_t_to_compat_clock_t(clock_t x) | |||
| 328 | return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); | 328 | return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); |
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) | 331 | COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) |
| 332 | { | 332 | { |
| 333 | if (tbuf) { | 333 | if (tbuf) { |
| 334 | struct tms tms; | 334 | struct tms tms; |
| @@ -354,7 +354,7 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) | |||
| 354 | * types that can be passed to put_user()/get_user(). | 354 | * types that can be passed to put_user()/get_user(). |
| 355 | */ | 355 | */ |
| 356 | 356 | ||
| 357 | asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) | 357 | COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set) |
| 358 | { | 358 | { |
| 359 | old_sigset_t s; | 359 | old_sigset_t s; |
| 360 | long ret; | 360 | long ret; |
| @@ -424,8 +424,8 @@ COMPAT_SYSCALL_DEFINE3(sigprocmask, int, how, | |||
| 424 | 424 | ||
| 425 | #endif | 425 | #endif |
| 426 | 426 | ||
| 427 | asmlinkage long compat_sys_setrlimit(unsigned int resource, | 427 | COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, |
| 428 | struct compat_rlimit __user *rlim) | 428 | struct compat_rlimit __user *, rlim) |
| 429 | { | 429 | { |
| 430 | struct rlimit r; | 430 | struct rlimit r; |
| 431 | 431 | ||
| @@ -443,15 +443,15 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, | |||
| 443 | 443 | ||
| 444 | #ifdef COMPAT_RLIM_OLD_INFINITY | 444 | #ifdef COMPAT_RLIM_OLD_INFINITY |
| 445 | 445 | ||
| 446 | asmlinkage long compat_sys_old_getrlimit(unsigned int resource, | 446 | COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, |
| 447 | struct compat_rlimit __user *rlim) | 447 | struct compat_rlimit __user *, rlim) |
| 448 | { | 448 | { |
| 449 | struct rlimit r; | 449 | struct rlimit r; |
| 450 | int ret; | 450 | int ret; |
| 451 | mm_segment_t old_fs = get_fs(); | 451 | mm_segment_t old_fs = get_fs(); |
| 452 | 452 | ||
| 453 | set_fs(KERNEL_DS); | 453 | set_fs(KERNEL_DS); |
| 454 | ret = sys_old_getrlimit(resource, &r); | 454 | ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r); |
| 455 | set_fs(old_fs); | 455 | set_fs(old_fs); |
| 456 | 456 | ||
| 457 | if (!ret) { | 457 | if (!ret) { |
| @@ -470,8 +470,8 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, | |||
| 470 | 470 | ||
| 471 | #endif | 471 | #endif |
| 472 | 472 | ||
| 473 | asmlinkage long compat_sys_getrlimit(unsigned int resource, | 473 | COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, |
| 474 | struct compat_rlimit __user *rlim) | 474 | struct compat_rlimit __user *, rlim) |
| 475 | { | 475 | { |
| 476 | struct rlimit r; | 476 | struct rlimit r; |
| 477 | int ret; | 477 | int ret; |
| @@ -596,9 +596,9 @@ static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, | |||
| 596 | return compat_get_bitmap(k, user_mask_ptr, len * 8); | 596 | return compat_get_bitmap(k, user_mask_ptr, len * 8); |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, | 599 | COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid, |
| 600 | unsigned int len, | 600 | unsigned int, len, |
| 601 | compat_ulong_t __user *user_mask_ptr) | 601 | compat_ulong_t __user *, user_mask_ptr) |
| 602 | { | 602 | { |
| 603 | cpumask_var_t new_mask; | 603 | cpumask_var_t new_mask; |
| 604 | int retval; | 604 | int retval; |
| @@ -616,8 +616,8 @@ out: | |||
| 616 | return retval; | 616 | return retval; |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, | 619 | COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, |
| 620 | compat_ulong_t __user *user_mask_ptr) | 620 | compat_ulong_t __user *, user_mask_ptr) |
| 621 | { | 621 | { |
| 622 | int ret; | 622 | int ret; |
| 623 | cpumask_var_t mask; | 623 | cpumask_var_t mask; |
| @@ -647,8 +647,8 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, | |||
| 647 | int get_compat_itimerspec(struct itimerspec *dst, | 647 | int get_compat_itimerspec(struct itimerspec *dst, |
| 648 | const struct compat_itimerspec __user *src) | 648 | const struct compat_itimerspec __user *src) |
| 649 | { | 649 | { |
| 650 | if (get_compat_timespec(&dst->it_interval, &src->it_interval) || | 650 | if (__compat_get_timespec(&dst->it_interval, &src->it_interval) || |
| 651 | get_compat_timespec(&dst->it_value, &src->it_value)) | 651 | __compat_get_timespec(&dst->it_value, &src->it_value)) |
| 652 | return -EFAULT; | 652 | return -EFAULT; |
| 653 | return 0; | 653 | return 0; |
| 654 | } | 654 | } |
| @@ -656,15 +656,15 @@ int get_compat_itimerspec(struct itimerspec *dst, | |||
| 656 | int put_compat_itimerspec(struct compat_itimerspec __user *dst, | 656 | int put_compat_itimerspec(struct compat_itimerspec __user *dst, |
| 657 | const struct itimerspec *src) | 657 | const struct itimerspec *src) |
| 658 | { | 658 | { |
| 659 | if (put_compat_timespec(&src->it_interval, &dst->it_interval) || | 659 | if (__compat_put_timespec(&src->it_interval, &dst->it_interval) || |
| 660 | put_compat_timespec(&src->it_value, &dst->it_value)) | 660 | __compat_put_timespec(&src->it_value, &dst->it_value)) |
| 661 | return -EFAULT; | 661 | return -EFAULT; |
| 662 | return 0; | 662 | return 0; |
| 663 | } | 663 | } |
| 664 | 664 | ||
| 665 | long compat_sys_timer_create(clockid_t which_clock, | 665 | COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, |
| 666 | struct compat_sigevent __user *timer_event_spec, | 666 | struct compat_sigevent __user *, timer_event_spec, |
| 667 | timer_t __user *created_timer_id) | 667 | timer_t __user *, created_timer_id) |
| 668 | { | 668 | { |
| 669 | struct sigevent __user *event = NULL; | 669 | struct sigevent __user *event = NULL; |
| 670 | 670 | ||
| @@ -680,9 +680,9 @@ long compat_sys_timer_create(clockid_t which_clock, | |||
| 680 | return sys_timer_create(which_clock, event, created_timer_id); | 680 | return sys_timer_create(which_clock, event, created_timer_id); |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | long compat_sys_timer_settime(timer_t timer_id, int flags, | 683 | COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, |
| 684 | struct compat_itimerspec __user *new, | 684 | struct compat_itimerspec __user *, new, |
| 685 | struct compat_itimerspec __user *old) | 685 | struct compat_itimerspec __user *, old) |
| 686 | { | 686 | { |
| 687 | long err; | 687 | long err; |
| 688 | mm_segment_t oldfs; | 688 | mm_segment_t oldfs; |
| @@ -703,8 +703,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, | |||
| 703 | return err; | 703 | return err; |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | long compat_sys_timer_gettime(timer_t timer_id, | 706 | COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
| 707 | struct compat_itimerspec __user *setting) | 707 | struct compat_itimerspec __user *, setting) |
| 708 | { | 708 | { |
| 709 | long err; | 709 | long err; |
| 710 | mm_segment_t oldfs; | 710 | mm_segment_t oldfs; |
| @@ -720,14 +720,14 @@ long compat_sys_timer_gettime(timer_t timer_id, | |||
| 720 | return err; | 720 | return err; |
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | long compat_sys_clock_settime(clockid_t which_clock, | 723 | COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, |
| 724 | struct compat_timespec __user *tp) | 724 | struct compat_timespec __user *, tp) |
| 725 | { | 725 | { |
| 726 | long err; | 726 | long err; |
| 727 | mm_segment_t oldfs; | 727 | mm_segment_t oldfs; |
| 728 | struct timespec ts; | 728 | struct timespec ts; |
| 729 | 729 | ||
| 730 | if (get_compat_timespec(&ts, tp)) | 730 | if (compat_get_timespec(&ts, tp)) |
| 731 | return -EFAULT; | 731 | return -EFAULT; |
| 732 | oldfs = get_fs(); | 732 | oldfs = get_fs(); |
| 733 | set_fs(KERNEL_DS); | 733 | set_fs(KERNEL_DS); |
| @@ -737,8 +737,8 @@ long compat_sys_clock_settime(clockid_t which_clock, | |||
| 737 | return err; | 737 | return err; |
| 738 | } | 738 | } |
| 739 | 739 | ||
| 740 | long compat_sys_clock_gettime(clockid_t which_clock, | 740 | COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, |
| 741 | struct compat_timespec __user *tp) | 741 | struct compat_timespec __user *, tp) |
| 742 | { | 742 | { |
| 743 | long err; | 743 | long err; |
| 744 | mm_segment_t oldfs; | 744 | mm_segment_t oldfs; |
| @@ -749,13 +749,13 @@ long compat_sys_clock_gettime(clockid_t which_clock, | |||
| 749 | err = sys_clock_gettime(which_clock, | 749 | err = sys_clock_gettime(which_clock, |
| 750 | (struct timespec __user *) &ts); | 750 | (struct timespec __user *) &ts); |
| 751 | set_fs(oldfs); | 751 | set_fs(oldfs); |
| 752 | if (!err && put_compat_timespec(&ts, tp)) | 752 | if (!err && compat_put_timespec(&ts, tp)) |
| 753 | return -EFAULT; | 753 | return -EFAULT; |
| 754 | return err; | 754 | return err; |
| 755 | } | 755 | } |
| 756 | 756 | ||
| 757 | long compat_sys_clock_adjtime(clockid_t which_clock, | 757 | COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock, |
| 758 | struct compat_timex __user *utp) | 758 | struct compat_timex __user *, utp) |
| 759 | { | 759 | { |
| 760 | struct timex txc; | 760 | struct timex txc; |
| 761 | mm_segment_t oldfs; | 761 | mm_segment_t oldfs; |
| @@ -777,8 +777,8 @@ long compat_sys_clock_adjtime(clockid_t which_clock, | |||
| 777 | return ret; | 777 | return ret; |
| 778 | } | 778 | } |
| 779 | 779 | ||
| 780 | long compat_sys_clock_getres(clockid_t which_clock, | 780 | COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, |
| 781 | struct compat_timespec __user *tp) | 781 | struct compat_timespec __user *, tp) |
| 782 | { | 782 | { |
| 783 | long err; | 783 | long err; |
| 784 | mm_segment_t oldfs; | 784 | mm_segment_t oldfs; |
| @@ -789,7 +789,7 @@ long compat_sys_clock_getres(clockid_t which_clock, | |||
| 789 | err = sys_clock_getres(which_clock, | 789 | err = sys_clock_getres(which_clock, |
| 790 | (struct timespec __user *) &ts); | 790 | (struct timespec __user *) &ts); |
| 791 | set_fs(oldfs); | 791 | set_fs(oldfs); |
| 792 | if (!err && tp && put_compat_timespec(&ts, tp)) | 792 | if (!err && tp && compat_put_timespec(&ts, tp)) |
| 793 | return -EFAULT; | 793 | return -EFAULT; |
| 794 | return err; | 794 | return err; |
| 795 | } | 795 | } |
| @@ -799,7 +799,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 799 | long err; | 799 | long err; |
| 800 | mm_segment_t oldfs; | 800 | mm_segment_t oldfs; |
| 801 | struct timespec tu; | 801 | struct timespec tu; |
| 802 | struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; | 802 | struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; |
| 803 | 803 | ||
| 804 | restart->nanosleep.rmtp = (struct timespec __user *) &tu; | 804 | restart->nanosleep.rmtp = (struct timespec __user *) &tu; |
| 805 | oldfs = get_fs(); | 805 | oldfs = get_fs(); |
| @@ -808,7 +808,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 808 | set_fs(oldfs); | 808 | set_fs(oldfs); |
| 809 | 809 | ||
| 810 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && | 810 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && |
| 811 | put_compat_timespec(&tu, rmtp)) | 811 | compat_put_timespec(&tu, rmtp)) |
| 812 | return -EFAULT; | 812 | return -EFAULT; |
| 813 | 813 | ||
| 814 | if (err == -ERESTART_RESTARTBLOCK) { | 814 | if (err == -ERESTART_RESTARTBLOCK) { |
| @@ -818,16 +818,16 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 818 | return err; | 818 | return err; |
| 819 | } | 819 | } |
| 820 | 820 | ||
| 821 | long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | 821 | COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, |
| 822 | struct compat_timespec __user *rqtp, | 822 | struct compat_timespec __user *, rqtp, |
| 823 | struct compat_timespec __user *rmtp) | 823 | struct compat_timespec __user *, rmtp) |
| 824 | { | 824 | { |
| 825 | long err; | 825 | long err; |
| 826 | mm_segment_t oldfs; | 826 | mm_segment_t oldfs; |
| 827 | struct timespec in, out; | 827 | struct timespec in, out; |
| 828 | struct restart_block *restart; | 828 | struct restart_block *restart; |
| 829 | 829 | ||
| 830 | if (get_compat_timespec(&in, rqtp)) | 830 | if (compat_get_timespec(&in, rqtp)) |
| 831 | return -EFAULT; | 831 | return -EFAULT; |
| 832 | 832 | ||
| 833 | oldfs = get_fs(); | 833 | oldfs = get_fs(); |
| @@ -838,7 +838,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | |||
| 838 | set_fs(oldfs); | 838 | set_fs(oldfs); |
| 839 | 839 | ||
| 840 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && | 840 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && |
| 841 | put_compat_timespec(&out, rmtp)) | 841 | compat_put_timespec(&out, rmtp)) |
| 842 | return -EFAULT; | 842 | return -EFAULT; |
| 843 | 843 | ||
| 844 | if (err == -ERESTART_RESTARTBLOCK) { | 844 | if (err == -ERESTART_RESTARTBLOCK) { |
| @@ -1010,7 +1010,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, | |||
| 1010 | 1010 | ||
| 1011 | /* compat_time_t is a 32 bit "long" and needs to get converted. */ | 1011 | /* compat_time_t is a 32 bit "long" and needs to get converted. */ |
| 1012 | 1012 | ||
| 1013 | asmlinkage long compat_sys_time(compat_time_t __user * tloc) | 1013 | COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) |
| 1014 | { | 1014 | { |
| 1015 | compat_time_t i; | 1015 | compat_time_t i; |
| 1016 | struct timeval tv; | 1016 | struct timeval tv; |
| @@ -1026,7 +1026,7 @@ asmlinkage long compat_sys_time(compat_time_t __user * tloc) | |||
| 1026 | return i; | 1026 | return i; |
| 1027 | } | 1027 | } |
| 1028 | 1028 | ||
| 1029 | asmlinkage long compat_sys_stime(compat_time_t __user *tptr) | 1029 | COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr) |
| 1030 | { | 1030 | { |
| 1031 | struct timespec tv; | 1031 | struct timespec tv; |
| 1032 | int err; | 1032 | int err; |
| @@ -1046,7 +1046,7 @@ asmlinkage long compat_sys_stime(compat_time_t __user *tptr) | |||
| 1046 | 1046 | ||
| 1047 | #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ | 1047 | #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ |
| 1048 | 1048 | ||
| 1049 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) | 1049 | COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp) |
| 1050 | { | 1050 | { |
| 1051 | struct timex txc; | 1051 | struct timex txc; |
| 1052 | int err, ret; | 1052 | int err, ret; |
| @@ -1065,11 +1065,11 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) | |||
| 1065 | } | 1065 | } |
| 1066 | 1066 | ||
| 1067 | #ifdef CONFIG_NUMA | 1067 | #ifdef CONFIG_NUMA |
| 1068 | asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | 1068 | COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, |
| 1069 | compat_uptr_t __user *pages32, | 1069 | compat_uptr_t __user *, pages32, |
| 1070 | const int __user *nodes, | 1070 | const int __user *, nodes, |
| 1071 | int __user *status, | 1071 | int __user *, status, |
| 1072 | int flags) | 1072 | int, flags) |
| 1073 | { | 1073 | { |
| 1074 | const void __user * __user *pages; | 1074 | const void __user * __user *pages; |
| 1075 | int i; | 1075 | int i; |
| @@ -1085,10 +1085,10 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
| 1085 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); | 1085 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); |
| 1086 | } | 1086 | } |
| 1087 | 1087 | ||
| 1088 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | 1088 | COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, |
| 1089 | compat_ulong_t maxnode, | 1089 | compat_ulong_t, maxnode, |
| 1090 | const compat_ulong_t __user *old_nodes, | 1090 | const compat_ulong_t __user *, old_nodes, |
| 1091 | const compat_ulong_t __user *new_nodes) | 1091 | const compat_ulong_t __user *, new_nodes) |
| 1092 | { | 1092 | { |
| 1093 | unsigned long __user *old = NULL; | 1093 | unsigned long __user *old = NULL; |
| 1094 | unsigned long __user *new = NULL; | 1094 | unsigned long __user *new = NULL; |
| @@ -1130,7 +1130,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, | |||
| 1130 | set_fs(KERNEL_DS); | 1130 | set_fs(KERNEL_DS); |
| 1131 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); | 1131 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); |
| 1132 | set_fs(old_fs); | 1132 | set_fs(old_fs); |
| 1133 | if (put_compat_timespec(&t, interval)) | 1133 | if (compat_put_timespec(&t, interval)) |
| 1134 | return -EFAULT; | 1134 | return -EFAULT; |
| 1135 | return ret; | 1135 | return ret; |
| 1136 | } | 1136 | } |
diff --git a/kernel/cpu.c b/kernel/cpu.c index deff2e693766..a9e710eef0e2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
| 20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
| 21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
| 22 | #include <linux/lockdep.h> | ||
| 22 | 23 | ||
| 23 | #include "smpboot.h" | 24 | #include "smpboot.h" |
| 24 | 25 | ||
| @@ -27,18 +28,23 @@ | |||
| 27 | static DEFINE_MUTEX(cpu_add_remove_lock); | 28 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * The following two API's must be used when attempting | 31 | * The following two APIs (cpu_maps_update_begin/done) must be used when |
| 31 | * to serialize the updates to cpu_online_mask, cpu_present_mask. | 32 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. |
| 33 | * The APIs cpu_notifier_register_begin/done() must be used to protect CPU | ||
| 34 | * hotplug callback (un)registration performed using __register_cpu_notifier() | ||
| 35 | * or __unregister_cpu_notifier(). | ||
| 32 | */ | 36 | */ |
| 33 | void cpu_maps_update_begin(void) | 37 | void cpu_maps_update_begin(void) |
| 34 | { | 38 | { |
| 35 | mutex_lock(&cpu_add_remove_lock); | 39 | mutex_lock(&cpu_add_remove_lock); |
| 36 | } | 40 | } |
| 41 | EXPORT_SYMBOL(cpu_notifier_register_begin); | ||
| 37 | 42 | ||
| 38 | void cpu_maps_update_done(void) | 43 | void cpu_maps_update_done(void) |
| 39 | { | 44 | { |
| 40 | mutex_unlock(&cpu_add_remove_lock); | 45 | mutex_unlock(&cpu_add_remove_lock); |
| 41 | } | 46 | } |
| 47 | EXPORT_SYMBOL(cpu_notifier_register_done); | ||
| 42 | 48 | ||
| 43 | static RAW_NOTIFIER_HEAD(cpu_chain); | 49 | static RAW_NOTIFIER_HEAD(cpu_chain); |
| 44 | 50 | ||
| @@ -57,17 +63,30 @@ static struct { | |||
| 57 | * an ongoing cpu hotplug operation. | 63 | * an ongoing cpu hotplug operation. |
| 58 | */ | 64 | */ |
| 59 | int refcount; | 65 | int refcount; |
| 66 | |||
| 67 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 68 | struct lockdep_map dep_map; | ||
| 69 | #endif | ||
| 60 | } cpu_hotplug = { | 70 | } cpu_hotplug = { |
| 61 | .active_writer = NULL, | 71 | .active_writer = NULL, |
| 62 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | 72 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
| 63 | .refcount = 0, | 73 | .refcount = 0, |
| 74 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 75 | .dep_map = {.name = "cpu_hotplug.lock" }, | ||
| 76 | #endif | ||
| 64 | }; | 77 | }; |
| 65 | 78 | ||
| 79 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ | ||
| 80 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) | ||
| 81 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | ||
| 82 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | ||
| 83 | |||
| 66 | void get_online_cpus(void) | 84 | void get_online_cpus(void) |
| 67 | { | 85 | { |
| 68 | might_sleep(); | 86 | might_sleep(); |
| 69 | if (cpu_hotplug.active_writer == current) | 87 | if (cpu_hotplug.active_writer == current) |
| 70 | return; | 88 | return; |
| 89 | cpuhp_lock_acquire_read(); | ||
| 71 | mutex_lock(&cpu_hotplug.lock); | 90 | mutex_lock(&cpu_hotplug.lock); |
| 72 | cpu_hotplug.refcount++; | 91 | cpu_hotplug.refcount++; |
| 73 | mutex_unlock(&cpu_hotplug.lock); | 92 | mutex_unlock(&cpu_hotplug.lock); |
| @@ -87,6 +106,7 @@ void put_online_cpus(void) | |||
| 87 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) | 106 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
| 88 | wake_up_process(cpu_hotplug.active_writer); | 107 | wake_up_process(cpu_hotplug.active_writer); |
| 89 | mutex_unlock(&cpu_hotplug.lock); | 108 | mutex_unlock(&cpu_hotplug.lock); |
| 109 | cpuhp_lock_release(); | ||
| 90 | 110 | ||
| 91 | } | 111 | } |
| 92 | EXPORT_SYMBOL_GPL(put_online_cpus); | 112 | EXPORT_SYMBOL_GPL(put_online_cpus); |
| @@ -117,6 +137,7 @@ void cpu_hotplug_begin(void) | |||
| 117 | { | 137 | { |
| 118 | cpu_hotplug.active_writer = current; | 138 | cpu_hotplug.active_writer = current; |
| 119 | 139 | ||
| 140 | cpuhp_lock_acquire(); | ||
| 120 | for (;;) { | 141 | for (;;) { |
| 121 | mutex_lock(&cpu_hotplug.lock); | 142 | mutex_lock(&cpu_hotplug.lock); |
| 122 | if (likely(!cpu_hotplug.refcount)) | 143 | if (likely(!cpu_hotplug.refcount)) |
| @@ -131,6 +152,7 @@ void cpu_hotplug_done(void) | |||
| 131 | { | 152 | { |
| 132 | cpu_hotplug.active_writer = NULL; | 153 | cpu_hotplug.active_writer = NULL; |
| 133 | mutex_unlock(&cpu_hotplug.lock); | 154 | mutex_unlock(&cpu_hotplug.lock); |
| 155 | cpuhp_lock_release(); | ||
| 134 | } | 156 | } |
| 135 | 157 | ||
| 136 | /* | 158 | /* |
| @@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb) | |||
| 166 | return ret; | 188 | return ret; |
| 167 | } | 189 | } |
| 168 | 190 | ||
| 191 | int __ref __register_cpu_notifier(struct notifier_block *nb) | ||
| 192 | { | ||
| 193 | return raw_notifier_chain_register(&cpu_chain, nb); | ||
| 194 | } | ||
| 195 | |||
| 169 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, | 196 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, |
| 170 | int *nr_calls) | 197 | int *nr_calls) |
| 171 | { | 198 | { |
| @@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v) | |||
| 189 | BUG_ON(cpu_notify(val, v)); | 216 | BUG_ON(cpu_notify(val, v)); |
| 190 | } | 217 | } |
| 191 | EXPORT_SYMBOL(register_cpu_notifier); | 218 | EXPORT_SYMBOL(register_cpu_notifier); |
| 219 | EXPORT_SYMBOL(__register_cpu_notifier); | ||
| 192 | 220 | ||
| 193 | void __ref unregister_cpu_notifier(struct notifier_block *nb) | 221 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
| 194 | { | 222 | { |
| @@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) | |||
| 198 | } | 226 | } |
| 199 | EXPORT_SYMBOL(unregister_cpu_notifier); | 227 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 200 | 228 | ||
| 229 | void __ref __unregister_cpu_notifier(struct notifier_block *nb) | ||
| 230 | { | ||
| 231 | raw_notifier_chain_unregister(&cpu_chain, nb); | ||
| 232 | } | ||
| 233 | EXPORT_SYMBOL(__unregister_cpu_notifier); | ||
| 234 | |||
| 201 | /** | 235 | /** |
| 202 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU | 236 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU |
| 203 | * @cpu: a CPU id | 237 | * @cpu: a CPU id |
diff --git a/kernel/cpu/Makefile b/kernel/cpu/Makefile deleted file mode 100644 index 59ab052ef7a0..000000000000 --- a/kernel/cpu/Makefile +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | obj-y = idle.o | ||
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c deleted file mode 100644 index 277f494c2a9a..000000000000 --- a/kernel/cpu/idle.c +++ /dev/null | |||
| @@ -1,144 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Generic entry point for the idle threads | ||
| 3 | */ | ||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/cpu.h> | ||
| 6 | #include <linux/tick.h> | ||
| 7 | #include <linux/mm.h> | ||
| 8 | #include <linux/stackprotector.h> | ||
| 9 | |||
| 10 | #include <asm/tlb.h> | ||
| 11 | |||
| 12 | #include <trace/events/power.h> | ||
| 13 | |||
| 14 | static int __read_mostly cpu_idle_force_poll; | ||
| 15 | |||
| 16 | void cpu_idle_poll_ctrl(bool enable) | ||
| 17 | { | ||
| 18 | if (enable) { | ||
| 19 | cpu_idle_force_poll++; | ||
| 20 | } else { | ||
| 21 | cpu_idle_force_poll--; | ||
| 22 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | ||
| 23 | } | ||
| 24 | } | ||
| 25 | |||
| 26 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | ||
| 27 | static int __init cpu_idle_poll_setup(char *__unused) | ||
| 28 | { | ||
| 29 | cpu_idle_force_poll = 1; | ||
| 30 | return 1; | ||
| 31 | } | ||
| 32 | __setup("nohlt", cpu_idle_poll_setup); | ||
| 33 | |||
| 34 | static int __init cpu_idle_nopoll_setup(char *__unused) | ||
| 35 | { | ||
| 36 | cpu_idle_force_poll = 0; | ||
| 37 | return 1; | ||
| 38 | } | ||
| 39 | __setup("hlt", cpu_idle_nopoll_setup); | ||
| 40 | #endif | ||
| 41 | |||
| 42 | static inline int cpu_idle_poll(void) | ||
| 43 | { | ||
| 44 | rcu_idle_enter(); | ||
| 45 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | ||
| 46 | local_irq_enable(); | ||
| 47 | while (!tif_need_resched()) | ||
| 48 | cpu_relax(); | ||
| 49 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 50 | rcu_idle_exit(); | ||
| 51 | return 1; | ||
| 52 | } | ||
| 53 | |||
| 54 | /* Weak implementations for optional arch specific functions */ | ||
| 55 | void __weak arch_cpu_idle_prepare(void) { } | ||
| 56 | void __weak arch_cpu_idle_enter(void) { } | ||
| 57 | void __weak arch_cpu_idle_exit(void) { } | ||
| 58 | void __weak arch_cpu_idle_dead(void) { } | ||
| 59 | void __weak arch_cpu_idle(void) | ||
| 60 | { | ||
| 61 | cpu_idle_force_poll = 1; | ||
| 62 | local_irq_enable(); | ||
| 63 | } | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Generic idle loop implementation | ||
| 67 | */ | ||
| 68 | static void cpu_idle_loop(void) | ||
| 69 | { | ||
| 70 | while (1) { | ||
| 71 | tick_nohz_idle_enter(); | ||
| 72 | |||
| 73 | while (!need_resched()) { | ||
| 74 | check_pgt_cache(); | ||
| 75 | rmb(); | ||
| 76 | |||
| 77 | if (cpu_is_offline(smp_processor_id())) | ||
| 78 | arch_cpu_idle_dead(); | ||
| 79 | |||
| 80 | local_irq_disable(); | ||
| 81 | arch_cpu_idle_enter(); | ||
| 82 | |||
| 83 | /* | ||
| 84 | * In poll mode we reenable interrupts and spin. | ||
| 85 | * | ||
| 86 | * Also if we detected in the wakeup from idle | ||
| 87 | * path that the tick broadcast device expired | ||
| 88 | * for us, we don't want to go deep idle as we | ||
| 89 | * know that the IPI is going to arrive right | ||
| 90 | * away | ||
| 91 | */ | ||
| 92 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | ||
| 93 | cpu_idle_poll(); | ||
| 94 | } else { | ||
| 95 | if (!current_clr_polling_and_test()) { | ||
| 96 | stop_critical_timings(); | ||
| 97 | rcu_idle_enter(); | ||
| 98 | arch_cpu_idle(); | ||
| 99 | WARN_ON_ONCE(irqs_disabled()); | ||
| 100 | rcu_idle_exit(); | ||
| 101 | start_critical_timings(); | ||
| 102 | } else { | ||
| 103 | local_irq_enable(); | ||
| 104 | } | ||
| 105 | __current_set_polling(); | ||
| 106 | } | ||
| 107 | arch_cpu_idle_exit(); | ||
| 108 | } | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Since we fell out of the loop above, we know | ||
| 112 | * TIF_NEED_RESCHED must be set, propagate it into | ||
| 113 | * PREEMPT_NEED_RESCHED. | ||
| 114 | * | ||
| 115 | * This is required because for polling idle loops we will | ||
| 116 | * not have had an IPI to fold the state for us. | ||
| 117 | */ | ||
| 118 | preempt_set_need_resched(); | ||
| 119 | tick_nohz_idle_exit(); | ||
| 120 | schedule_preempt_disabled(); | ||
| 121 | } | ||
| 122 | } | ||
| 123 | |||
| 124 | void cpu_startup_entry(enum cpuhp_state state) | ||
| 125 | { | ||
| 126 | /* | ||
| 127 | * This #ifdef needs to die, but it's too late in the cycle to | ||
| 128 | * make this generic (arm and sh have never invoked the canary | ||
| 129 | * init for the non boot cpus!). Will be fixed in 3.11 | ||
| 130 | */ | ||
| 131 | #ifdef CONFIG_X86 | ||
| 132 | /* | ||
| 133 | * If we're the non-boot CPU, nothing set the stack canary up | ||
| 134 | * for us. The boot CPU already has it initialized but no harm | ||
| 135 | * in doing it again. This is a good place for updating it, as | ||
| 136 | * we wont ever return from this function (so the invalid | ||
| 137 | * canaries already on the stack wont ever trigger). | ||
| 138 | */ | ||
| 139 | boot_init_stack_canary(); | ||
| 140 | #endif | ||
| 141 | __current_set_polling(); | ||
| 142 | arch_cpu_idle_prepare(); | ||
| 143 | cpu_idle_loop(); | ||
| 144 | } | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..3d54c418bd06 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) | |||
| 119 | /* Retrieve the cpuset for a task */ | 119 | /* Retrieve the cpuset for a task */ |
| 120 | static inline struct cpuset *task_cs(struct task_struct *task) | 120 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 121 | { | 121 | { |
| 122 | return css_cs(task_css(task, cpuset_subsys_id)); | 122 | return css_cs(task_css(task, cpuset_cgrp_id)); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline struct cpuset *parent_cs(struct cpuset *cs) | 125 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
| @@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
| 467 | * be changed to have empty cpus_allowed or mems_allowed. | 467 | * be changed to have empty cpus_allowed or mems_allowed. |
| 468 | */ | 468 | */ |
| 469 | ret = -ENOSPC; | 469 | ret = -ENOSPC; |
| 470 | if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { | 470 | if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) { |
| 471 | if (!cpumask_empty(cur->cpus_allowed) && | 471 | if (!cpumask_empty(cur->cpus_allowed) && |
| 472 | cpumask_empty(trial->cpus_allowed)) | 472 | cpumask_empty(trial->cpus_allowed)) |
| 473 | goto out; | 473 | goto out; |
| @@ -829,55 +829,36 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs) | |||
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | /** | 831 | /** |
| 832 | * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's | ||
| 833 | * @tsk: task to test | ||
| 834 | * @data: cpuset to @tsk belongs to | ||
| 835 | * | ||
| 836 | * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed | ||
| 837 | * mask needs to be changed. | ||
| 838 | * | ||
| 839 | * We don't need to re-check for the cgroup/cpuset membership, since we're | ||
| 840 | * holding cpuset_mutex at this point. | ||
| 841 | */ | ||
| 842 | static void cpuset_change_cpumask(struct task_struct *tsk, void *data) | ||
| 843 | { | ||
| 844 | struct cpuset *cs = data; | ||
| 845 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); | ||
| 846 | |||
| 847 | set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed); | ||
| 848 | } | ||
| 849 | |||
| 850 | /** | ||
| 851 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | 832 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 852 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | 833 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 853 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 854 | * | ||
| 855 | * Called with cpuset_mutex held | ||
| 856 | * | 834 | * |
| 857 | * The css_scan_tasks() function will scan all the tasks in a cgroup, | 835 | * Iterate through each task of @cs updating its cpus_allowed to the |
| 858 | * calling callback functions for each. | 836 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 859 | * | 837 | * cpuset membership stays stable. |
| 860 | * No return value. It's guaranteed that css_scan_tasks() always returns 0 | ||
| 861 | * if @heap != NULL. | ||
| 862 | */ | 838 | */ |
| 863 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) | 839 | static void update_tasks_cpumask(struct cpuset *cs) |
| 864 | { | 840 | { |
| 865 | css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap); | 841 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); |
| 842 | struct css_task_iter it; | ||
| 843 | struct task_struct *task; | ||
| 844 | |||
| 845 | css_task_iter_start(&cs->css, &it); | ||
| 846 | while ((task = css_task_iter_next(&it))) | ||
| 847 | set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed); | ||
| 848 | css_task_iter_end(&it); | ||
| 866 | } | 849 | } |
| 867 | 850 | ||
| 868 | /* | 851 | /* |
| 869 | * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. | 852 | * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. |
| 870 | * @root_cs: the root cpuset of the hierarchy | 853 | * @root_cs: the root cpuset of the hierarchy |
| 871 | * @update_root: update root cpuset or not? | 854 | * @update_root: update root cpuset or not? |
| 872 | * @heap: the heap used by css_scan_tasks() | ||
| 873 | * | 855 | * |
| 874 | * This will update cpumasks of tasks in @root_cs and all other empty cpusets | 856 | * This will update cpumasks of tasks in @root_cs and all other empty cpusets |
| 875 | * which take on cpumask of @root_cs. | 857 | * which take on cpumask of @root_cs. |
| 876 | * | 858 | * |
| 877 | * Called with cpuset_mutex held | 859 | * Called with cpuset_mutex held |
| 878 | */ | 860 | */ |
| 879 | static void update_tasks_cpumask_hier(struct cpuset *root_cs, | 861 | static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root) |
| 880 | bool update_root, struct ptr_heap *heap) | ||
| 881 | { | 862 | { |
| 882 | struct cpuset *cp; | 863 | struct cpuset *cp; |
| 883 | struct cgroup_subsys_state *pos_css; | 864 | struct cgroup_subsys_state *pos_css; |
| @@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
| 898 | continue; | 879 | continue; |
| 899 | rcu_read_unlock(); | 880 | rcu_read_unlock(); |
| 900 | 881 | ||
| 901 | update_tasks_cpumask(cp, heap); | 882 | update_tasks_cpumask(cp); |
| 902 | 883 | ||
| 903 | rcu_read_lock(); | 884 | rcu_read_lock(); |
| 904 | css_put(&cp->css); | 885 | css_put(&cp->css); |
| @@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
| 914 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | 895 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 915 | const char *buf) | 896 | const char *buf) |
| 916 | { | 897 | { |
| 917 | struct ptr_heap heap; | ||
| 918 | int retval; | 898 | int retval; |
| 919 | int is_load_balanced; | 899 | int is_load_balanced; |
| 920 | 900 | ||
| @@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 947 | if (retval < 0) | 927 | if (retval < 0) |
| 948 | return retval; | 928 | return retval; |
| 949 | 929 | ||
| 950 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 951 | if (retval) | ||
| 952 | return retval; | ||
| 953 | |||
| 954 | is_load_balanced = is_sched_load_balance(trialcs); | 930 | is_load_balanced = is_sched_load_balance(trialcs); |
| 955 | 931 | ||
| 956 | mutex_lock(&callback_mutex); | 932 | mutex_lock(&callback_mutex); |
| 957 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); | 933 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
| 958 | mutex_unlock(&callback_mutex); | 934 | mutex_unlock(&callback_mutex); |
| 959 | 935 | ||
| 960 | update_tasks_cpumask_hier(cs, true, &heap); | 936 | update_tasks_cpumask_hier(cs, true); |
| 961 | |||
| 962 | heap_free(&heap); | ||
| 963 | 937 | ||
| 964 | if (is_load_balanced) | 938 | if (is_load_balanced) |
| 965 | rebuild_sched_domains_locked(); | 939 | rebuild_sched_domains_locked(); |
| @@ -974,12 +948,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, | 948 | * Temporarilly set tasks mems_allowed to target nodes of migration, |
| 975 | * so that the migration code can allocate pages on these nodes. | 949 | * so that the migration code can allocate pages on these nodes. |
| 976 | * | 950 | * |
| 977 | * Call holding cpuset_mutex, so current's cpuset won't change | ||
| 978 | * during this call, as manage_mutex holds off any cpuset_attach() | ||
| 979 | * calls. Therefore we don't need to take task_lock around the | ||
| 980 | * call to guarantee_online_mems(), as we know no one is changing | ||
| 981 | * our task's cpuset. | ||
| 982 | * | ||
| 983 | * While the mm_struct we are migrating is typically from some | 951 | * While the mm_struct we are migrating is typically from some |
| 984 | * other task, the task_struct mems_allowed that we are hacking | 952 | * other task, the task_struct mems_allowed that we are hacking |
| 985 | * is for our current task, which must allocate new pages for that | 953 | * is for our current task, which must allocate new pages for that |
| @@ -996,8 +964,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
| 996 | 964 | ||
| 997 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 965 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
| 998 | 966 | ||
| 967 | rcu_read_lock(); | ||
| 999 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 968 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
| 1000 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); | 969 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); |
| 970 | rcu_read_unlock(); | ||
| 1001 | } | 971 | } |
| 1002 | 972 | ||
| 1003 | /* | 973 | /* |
| @@ -1026,7 +996,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
| 1026 | task_lock(tsk); | 996 | task_lock(tsk); |
| 1027 | /* | 997 | /* |
| 1028 | * Determine if a loop is necessary if another thread is doing | 998 | * Determine if a loop is necessary if another thread is doing |
| 1029 | * get_mems_allowed(). If at least one node remains unchanged and | 999 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
| 1030 | * tsk does not have a mempolicy, then an empty nodemask will not be | 1000 | * tsk does not have a mempolicy, then an empty nodemask will not be |
| 1031 | * possible when mems_allowed is larger than a word. | 1001 | * possible when mems_allowed is larger than a word. |
| 1032 | */ | 1002 | */ |
| @@ -1052,53 +1022,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
| 1052 | task_unlock(tsk); | 1022 | task_unlock(tsk); |
| 1053 | } | 1023 | } |
| 1054 | 1024 | ||
| 1055 | struct cpuset_change_nodemask_arg { | ||
| 1056 | struct cpuset *cs; | ||
| 1057 | nodemask_t *newmems; | ||
| 1058 | }; | ||
| 1059 | |||
| 1060 | /* | ||
| 1061 | * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy | ||
| 1062 | * of it to cpuset's new mems_allowed, and migrate pages to new nodes if | ||
| 1063 | * memory_migrate flag is set. Called with cpuset_mutex held. | ||
| 1064 | */ | ||
| 1065 | static void cpuset_change_nodemask(struct task_struct *p, void *data) | ||
| 1066 | { | ||
| 1067 | struct cpuset_change_nodemask_arg *arg = data; | ||
| 1068 | struct cpuset *cs = arg->cs; | ||
| 1069 | struct mm_struct *mm; | ||
| 1070 | int migrate; | ||
| 1071 | |||
| 1072 | cpuset_change_task_nodemask(p, arg->newmems); | ||
| 1073 | |||
| 1074 | mm = get_task_mm(p); | ||
| 1075 | if (!mm) | ||
| 1076 | return; | ||
| 1077 | |||
| 1078 | migrate = is_memory_migrate(cs); | ||
| 1079 | |||
| 1080 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
| 1081 | if (migrate) | ||
| 1082 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems); | ||
| 1083 | mmput(mm); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static void *cpuset_being_rebound; | 1025 | static void *cpuset_being_rebound; |
| 1087 | 1026 | ||
| 1088 | /** | 1027 | /** |
| 1089 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | 1028 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1090 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | 1029 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
| 1091 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 1092 | * | 1030 | * |
| 1093 | * Called with cpuset_mutex held. No return value. It's guaranteed that | 1031 | * Iterate through each task of @cs updating its mems_allowed to the |
| 1094 | * css_scan_tasks() always returns 0 if @heap != NULL. | 1032 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1033 | * cpuset membership stays stable. | ||
| 1095 | */ | 1034 | */ |
| 1096 | static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | 1035 | static void update_tasks_nodemask(struct cpuset *cs) |
| 1097 | { | 1036 | { |
| 1098 | static nodemask_t newmems; /* protected by cpuset_mutex */ | 1037 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
| 1099 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); | 1038 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); |
| 1100 | struct cpuset_change_nodemask_arg arg = { .cs = cs, | 1039 | struct css_task_iter it; |
| 1101 | .newmems = &newmems }; | 1040 | struct task_struct *task; |
| 1102 | 1041 | ||
| 1103 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ | 1042 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
| 1104 | 1043 | ||
| @@ -1114,7 +1053,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | |||
| 1114 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() | 1053 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
| 1115 | * is idempotent. Also migrate pages in each mm to new nodes. | 1054 | * is idempotent. Also migrate pages in each mm to new nodes. |
| 1116 | */ | 1055 | */ |
| 1117 | css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap); | 1056 | css_task_iter_start(&cs->css, &it); |
| 1057 | while ((task = css_task_iter_next(&it))) { | ||
| 1058 | struct mm_struct *mm; | ||
| 1059 | bool migrate; | ||
| 1060 | |||
| 1061 | cpuset_change_task_nodemask(task, &newmems); | ||
| 1062 | |||
| 1063 | mm = get_task_mm(task); | ||
| 1064 | if (!mm) | ||
| 1065 | continue; | ||
| 1066 | |||
| 1067 | migrate = is_memory_migrate(cs); | ||
| 1068 | |||
| 1069 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
| 1070 | if (migrate) | ||
| 1071 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); | ||
| 1072 | mmput(mm); | ||
| 1073 | } | ||
| 1074 | css_task_iter_end(&it); | ||
| 1118 | 1075 | ||
| 1119 | /* | 1076 | /* |
| 1120 | * All the tasks' nodemasks have been updated, update | 1077 | * All the tasks' nodemasks have been updated, update |
| @@ -1130,15 +1087,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | |||
| 1130 | * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. | 1087 | * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. |
| 1131 | * @cs: the root cpuset of the hierarchy | 1088 | * @cs: the root cpuset of the hierarchy |
| 1132 | * @update_root: update the root cpuset or not? | 1089 | * @update_root: update the root cpuset or not? |
| 1133 | * @heap: the heap used by css_scan_tasks() | ||
| 1134 | * | 1090 | * |
| 1135 | * This will update nodemasks of tasks in @root_cs and all other empty cpusets | 1091 | * This will update nodemasks of tasks in @root_cs and all other empty cpusets |
| 1136 | * which take on nodemask of @root_cs. | 1092 | * which take on nodemask of @root_cs. |
| 1137 | * | 1093 | * |
| 1138 | * Called with cpuset_mutex held | 1094 | * Called with cpuset_mutex held |
| 1139 | */ | 1095 | */ |
| 1140 | static void update_tasks_nodemask_hier(struct cpuset *root_cs, | 1096 | static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root) |
| 1141 | bool update_root, struct ptr_heap *heap) | ||
| 1142 | { | 1097 | { |
| 1143 | struct cpuset *cp; | 1098 | struct cpuset *cp; |
| 1144 | struct cgroup_subsys_state *pos_css; | 1099 | struct cgroup_subsys_state *pos_css; |
| @@ -1159,7 +1114,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, | |||
| 1159 | continue; | 1114 | continue; |
| 1160 | rcu_read_unlock(); | 1115 | rcu_read_unlock(); |
| 1161 | 1116 | ||
| 1162 | update_tasks_nodemask(cp, heap); | 1117 | update_tasks_nodemask(cp); |
| 1163 | 1118 | ||
| 1164 | rcu_read_lock(); | 1119 | rcu_read_lock(); |
| 1165 | css_put(&cp->css); | 1120 | css_put(&cp->css); |
| @@ -1184,7 +1139,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 1184 | const char *buf) | 1139 | const char *buf) |
| 1185 | { | 1140 | { |
| 1186 | int retval; | 1141 | int retval; |
| 1187 | struct ptr_heap heap; | ||
| 1188 | 1142 | ||
| 1189 | /* | 1143 | /* |
| 1190 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; | 1144 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
| @@ -1223,17 +1177,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 1223 | if (retval < 0) | 1177 | if (retval < 0) |
| 1224 | goto done; | 1178 | goto done; |
| 1225 | 1179 | ||
| 1226 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 1227 | if (retval < 0) | ||
| 1228 | goto done; | ||
| 1229 | |||
| 1230 | mutex_lock(&callback_mutex); | 1180 | mutex_lock(&callback_mutex); |
| 1231 | cs->mems_allowed = trialcs->mems_allowed; | 1181 | cs->mems_allowed = trialcs->mems_allowed; |
| 1232 | mutex_unlock(&callback_mutex); | 1182 | mutex_unlock(&callback_mutex); |
| 1233 | 1183 | ||
| 1234 | update_tasks_nodemask_hier(cs, true, &heap); | 1184 | update_tasks_nodemask_hier(cs, true); |
| 1235 | |||
| 1236 | heap_free(&heap); | ||
| 1237 | done: | 1185 | done: |
| 1238 | return retval; | 1186 | return retval; |
| 1239 | } | 1187 | } |
| @@ -1261,38 +1209,22 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) | |||
| 1261 | } | 1209 | } |
| 1262 | 1210 | ||
| 1263 | /** | 1211 | /** |
| 1264 | * cpuset_change_flag - make a task's spread flags the same as its cpuset's | ||
| 1265 | * @tsk: task to be updated | ||
| 1266 | * @data: cpuset to @tsk belongs to | ||
| 1267 | * | ||
| 1268 | * Called by css_scan_tasks() for each task in a cgroup. | ||
| 1269 | * | ||
| 1270 | * We don't need to re-check for the cgroup/cpuset membership, since we're | ||
| 1271 | * holding cpuset_mutex at this point. | ||
| 1272 | */ | ||
| 1273 | static void cpuset_change_flag(struct task_struct *tsk, void *data) | ||
| 1274 | { | ||
| 1275 | struct cpuset *cs = data; | ||
| 1276 | |||
| 1277 | cpuset_update_task_spread_flag(cs, tsk); | ||
| 1278 | } | ||
| 1279 | |||
| 1280 | /** | ||
| 1281 | * update_tasks_flags - update the spread flags of tasks in the cpuset. | 1212 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1282 | * @cs: the cpuset in which each task's spread flags needs to be changed | 1213 | * @cs: the cpuset in which each task's spread flags needs to be changed |
| 1283 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 1284 | * | ||
| 1285 | * Called with cpuset_mutex held | ||
| 1286 | * | ||
| 1287 | * The css_scan_tasks() function will scan all the tasks in a cgroup, | ||
| 1288 | * calling callback functions for each. | ||
| 1289 | * | 1214 | * |
| 1290 | * No return value. It's guaranteed that css_scan_tasks() always returns 0 | 1215 | * Iterate through each task of @cs updating its spread flags. As this |
| 1291 | * if @heap != NULL. | 1216 | * function is called with cpuset_mutex held, cpuset membership stays |
| 1217 | * stable. | ||
| 1292 | */ | 1218 | */ |
| 1293 | static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) | 1219 | static void update_tasks_flags(struct cpuset *cs) |
| 1294 | { | 1220 | { |
| 1295 | css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap); | 1221 | struct css_task_iter it; |
| 1222 | struct task_struct *task; | ||
| 1223 | |||
| 1224 | css_task_iter_start(&cs->css, &it); | ||
| 1225 | while ((task = css_task_iter_next(&it))) | ||
| 1226 | cpuset_update_task_spread_flag(cs, task); | ||
| 1227 | css_task_iter_end(&it); | ||
| 1296 | } | 1228 | } |
| 1297 | 1229 | ||
| 1298 | /* | 1230 | /* |
| @@ -1310,7 +1242,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1310 | struct cpuset *trialcs; | 1242 | struct cpuset *trialcs; |
| 1311 | int balance_flag_changed; | 1243 | int balance_flag_changed; |
| 1312 | int spread_flag_changed; | 1244 | int spread_flag_changed; |
| 1313 | struct ptr_heap heap; | ||
| 1314 | int err; | 1245 | int err; |
| 1315 | 1246 | ||
| 1316 | trialcs = alloc_trial_cpuset(cs); | 1247 | trialcs = alloc_trial_cpuset(cs); |
| @@ -1326,10 +1257,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1326 | if (err < 0) | 1257 | if (err < 0) |
| 1327 | goto out; | 1258 | goto out; |
| 1328 | 1259 | ||
| 1329 | err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 1330 | if (err < 0) | ||
| 1331 | goto out; | ||
| 1332 | |||
| 1333 | balance_flag_changed = (is_sched_load_balance(cs) != | 1260 | balance_flag_changed = (is_sched_load_balance(cs) != |
| 1334 | is_sched_load_balance(trialcs)); | 1261 | is_sched_load_balance(trialcs)); |
| 1335 | 1262 | ||
| @@ -1344,8 +1271,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1344 | rebuild_sched_domains_locked(); | 1271 | rebuild_sched_domains_locked(); |
| 1345 | 1272 | ||
| 1346 | if (spread_flag_changed) | 1273 | if (spread_flag_changed) |
| 1347 | update_tasks_flags(cs, &heap); | 1274 | update_tasks_flags(cs); |
| 1348 | heap_free(&heap); | ||
| 1349 | out: | 1275 | out: |
| 1350 | free_trial_cpuset(trialcs); | 1276 | free_trial_cpuset(trialcs); |
| 1351 | return err; | 1277 | return err; |
| @@ -1449,6 +1375,8 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
| 1449 | return val; | 1375 | return val; |
| 1450 | } | 1376 | } |
| 1451 | 1377 | ||
| 1378 | static struct cpuset *cpuset_attach_old_cs; | ||
| 1379 | |||
| 1452 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ | 1380 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
| 1453 | static int cpuset_can_attach(struct cgroup_subsys_state *css, | 1381 | static int cpuset_can_attach(struct cgroup_subsys_state *css, |
| 1454 | struct cgroup_taskset *tset) | 1382 | struct cgroup_taskset *tset) |
| @@ -1457,6 +1385,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, | |||
| 1457 | struct task_struct *task; | 1385 | struct task_struct *task; |
| 1458 | int ret; | 1386 | int ret; |
| 1459 | 1387 | ||
| 1388 | /* used later by cpuset_attach() */ | ||
| 1389 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); | ||
| 1390 | |||
| 1460 | mutex_lock(&cpuset_mutex); | 1391 | mutex_lock(&cpuset_mutex); |
| 1461 | 1392 | ||
| 1462 | /* | 1393 | /* |
| @@ -1468,7 +1399,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, | |||
| 1468 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) | 1399 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
| 1469 | goto out_unlock; | 1400 | goto out_unlock; |
| 1470 | 1401 | ||
| 1471 | cgroup_taskset_for_each(task, css, tset) { | 1402 | cgroup_taskset_for_each(task, tset) { |
| 1472 | /* | 1403 | /* |
| 1473 | * Kthreads which disallow setaffinity shouldn't be moved | 1404 | * Kthreads which disallow setaffinity shouldn't be moved |
| 1474 | * to a new cpuset; we don't want to change their cpu | 1405 | * to a new cpuset; we don't want to change their cpu |
| @@ -1520,10 +1451,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css, | |||
| 1520 | struct mm_struct *mm; | 1451 | struct mm_struct *mm; |
| 1521 | struct task_struct *task; | 1452 | struct task_struct *task; |
| 1522 | struct task_struct *leader = cgroup_taskset_first(tset); | 1453 | struct task_struct *leader = cgroup_taskset_first(tset); |
| 1523 | struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, | ||
| 1524 | cpuset_subsys_id); | ||
| 1525 | struct cpuset *cs = css_cs(css); | 1454 | struct cpuset *cs = css_cs(css); |
| 1526 | struct cpuset *oldcs = css_cs(oldcss); | 1455 | struct cpuset *oldcs = cpuset_attach_old_cs; |
| 1527 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); | 1456 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); |
| 1528 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); | 1457 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); |
| 1529 | 1458 | ||
| @@ -1537,7 +1466,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, | |||
| 1537 | 1466 | ||
| 1538 | guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); | 1467 | guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); |
| 1539 | 1468 | ||
| 1540 | cgroup_taskset_for_each(task, css, tset) { | 1469 | cgroup_taskset_for_each(task, tset) { |
| 1541 | /* | 1470 | /* |
| 1542 | * can_attach beforehand should guarantee that this doesn't | 1471 | * can_attach beforehand should guarantee that this doesn't |
| 1543 | * fail. TODO: have a better way to handle failure here | 1472 | * fail. TODO: have a better way to handle failure here |
| @@ -1677,7 +1606,7 @@ out_unlock: | |||
| 1677 | * Common handling for a write to a "cpus" or "mems" file. | 1606 | * Common handling for a write to a "cpus" or "mems" file. |
| 1678 | */ | 1607 | */ |
| 1679 | static int cpuset_write_resmask(struct cgroup_subsys_state *css, | 1608 | static int cpuset_write_resmask(struct cgroup_subsys_state *css, |
| 1680 | struct cftype *cft, const char *buf) | 1609 | struct cftype *cft, char *buf) |
| 1681 | { | 1610 | { |
| 1682 | struct cpuset *cs = css_cs(css); | 1611 | struct cpuset *cs = css_cs(css); |
| 1683 | struct cpuset *trialcs; | 1612 | struct cpuset *trialcs; |
| @@ -2024,8 +1953,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) | |||
| 2024 | kfree(cs); | 1953 | kfree(cs); |
| 2025 | } | 1954 | } |
| 2026 | 1955 | ||
| 2027 | struct cgroup_subsys cpuset_subsys = { | 1956 | struct cgroup_subsys cpuset_cgrp_subsys = { |
| 2028 | .name = "cpuset", | ||
| 2029 | .css_alloc = cpuset_css_alloc, | 1957 | .css_alloc = cpuset_css_alloc, |
| 2030 | .css_online = cpuset_css_online, | 1958 | .css_online = cpuset_css_online, |
| 2031 | .css_offline = cpuset_css_offline, | 1959 | .css_offline = cpuset_css_offline, |
| @@ -2033,7 +1961,6 @@ struct cgroup_subsys cpuset_subsys = { | |||
| 2033 | .can_attach = cpuset_can_attach, | 1961 | .can_attach = cpuset_can_attach, |
| 2034 | .cancel_attach = cpuset_cancel_attach, | 1962 | .cancel_attach = cpuset_cancel_attach, |
| 2035 | .attach = cpuset_attach, | 1963 | .attach = cpuset_attach, |
| 2036 | .subsys_id = cpuset_subsys_id, | ||
| 2037 | .base_cftypes = files, | 1964 | .base_cftypes = files, |
| 2038 | .early_init = 1, | 1965 | .early_init = 1, |
| 2039 | }; | 1966 | }; |
| @@ -2090,10 +2017,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) | |||
| 2090 | parent = parent_cs(parent); | 2017 | parent = parent_cs(parent); |
| 2091 | 2018 | ||
| 2092 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { | 2019 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
| 2093 | rcu_read_lock(); | 2020 | printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset "); |
| 2094 | printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n", | 2021 | pr_cont_cgroup_name(cs->css.cgroup); |
| 2095 | cgroup_name(cs->css.cgroup)); | 2022 | pr_cont("\n"); |
| 2096 | rcu_read_unlock(); | ||
| 2097 | } | 2023 | } |
| 2098 | } | 2024 | } |
| 2099 | 2025 | ||
| @@ -2141,7 +2067,7 @@ retry: | |||
| 2141 | */ | 2067 | */ |
| 2142 | if ((sane && cpumask_empty(cs->cpus_allowed)) || | 2068 | if ((sane && cpumask_empty(cs->cpus_allowed)) || |
| 2143 | (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) | 2069 | (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) |
| 2144 | update_tasks_cpumask(cs, NULL); | 2070 | update_tasks_cpumask(cs); |
| 2145 | 2071 | ||
| 2146 | mutex_lock(&callback_mutex); | 2072 | mutex_lock(&callback_mutex); |
| 2147 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); | 2073 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); |
| @@ -2155,7 +2081,7 @@ retry: | |||
| 2155 | */ | 2081 | */ |
| 2156 | if ((sane && nodes_empty(cs->mems_allowed)) || | 2082 | if ((sane && nodes_empty(cs->mems_allowed)) || |
| 2157 | (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) | 2083 | (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) |
| 2158 | update_tasks_nodemask(cs, NULL); | 2084 | update_tasks_nodemask(cs); |
| 2159 | 2085 | ||
| 2160 | is_empty = cpumask_empty(cs->cpus_allowed) || | 2086 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 2161 | nodes_empty(cs->mems_allowed); | 2087 | nodes_empty(cs->mems_allowed); |
| @@ -2217,7 +2143,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
| 2217 | mutex_lock(&callback_mutex); | 2143 | mutex_lock(&callback_mutex); |
| 2218 | top_cpuset.mems_allowed = new_mems; | 2144 | top_cpuset.mems_allowed = new_mems; |
| 2219 | mutex_unlock(&callback_mutex); | 2145 | mutex_unlock(&callback_mutex); |
| 2220 | update_tasks_nodemask(&top_cpuset, NULL); | 2146 | update_tasks_nodemask(&top_cpuset); |
| 2221 | } | 2147 | } |
| 2222 | 2148 | ||
| 2223 | mutex_unlock(&cpuset_mutex); | 2149 | mutex_unlock(&cpuset_mutex); |
| @@ -2309,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) | |||
| 2309 | struct cpuset *cpus_cs; | 2235 | struct cpuset *cpus_cs; |
| 2310 | 2236 | ||
| 2311 | mutex_lock(&callback_mutex); | 2237 | mutex_lock(&callback_mutex); |
| 2312 | task_lock(tsk); | 2238 | rcu_read_lock(); |
| 2313 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); | 2239 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); |
| 2314 | guarantee_online_cpus(cpus_cs, pmask); | 2240 | guarantee_online_cpus(cpus_cs, pmask); |
| 2315 | task_unlock(tsk); | 2241 | rcu_read_unlock(); |
| 2316 | mutex_unlock(&callback_mutex); | 2242 | mutex_unlock(&callback_mutex); |
| 2317 | } | 2243 | } |
| 2318 | 2244 | ||
| @@ -2365,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |||
| 2365 | nodemask_t mask; | 2291 | nodemask_t mask; |
| 2366 | 2292 | ||
| 2367 | mutex_lock(&callback_mutex); | 2293 | mutex_lock(&callback_mutex); |
| 2368 | task_lock(tsk); | 2294 | rcu_read_lock(); |
| 2369 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 2295 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
| 2370 | guarantee_online_mems(mems_cs, &mask); | 2296 | guarantee_online_mems(mems_cs, &mask); |
| 2371 | task_unlock(tsk); | 2297 | rcu_read_unlock(); |
| 2372 | mutex_unlock(&callback_mutex); | 2298 | mutex_unlock(&callback_mutex); |
| 2373 | 2299 | ||
| 2374 | return mask; | 2300 | return mask; |
| @@ -2484,11 +2410,11 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
| 2484 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ | 2410 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
| 2485 | mutex_lock(&callback_mutex); | 2411 | mutex_lock(&callback_mutex); |
| 2486 | 2412 | ||
| 2487 | task_lock(current); | 2413 | rcu_read_lock(); |
| 2488 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2414 | cs = nearest_hardwall_ancestor(task_cs(current)); |
| 2489 | task_unlock(current); | ||
| 2490 | |||
| 2491 | allowed = node_isset(node, cs->mems_allowed); | 2415 | allowed = node_isset(node, cs->mems_allowed); |
| 2416 | rcu_read_unlock(); | ||
| 2417 | |||
| 2492 | mutex_unlock(&callback_mutex); | 2418 | mutex_unlock(&callback_mutex); |
| 2493 | return allowed; | 2419 | return allowed; |
| 2494 | } | 2420 | } |
| @@ -2613,27 +2539,27 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |||
| 2613 | * @task: pointer to task_struct of some task. | 2539 | * @task: pointer to task_struct of some task. |
| 2614 | * | 2540 | * |
| 2615 | * Description: Prints @task's name, cpuset name, and cached copy of its | 2541 | * Description: Prints @task's name, cpuset name, and cached copy of its |
| 2616 | * mems_allowed to the kernel log. Must hold task_lock(task) to allow | 2542 | * mems_allowed to the kernel log. |
| 2617 | * dereferencing task_cs(task). | ||
| 2618 | */ | 2543 | */ |
| 2619 | void cpuset_print_task_mems_allowed(struct task_struct *tsk) | 2544 | void cpuset_print_task_mems_allowed(struct task_struct *tsk) |
| 2620 | { | 2545 | { |
| 2621 | /* Statically allocated to prevent using excess stack. */ | 2546 | /* Statically allocated to prevent using excess stack. */ |
| 2622 | static char cpuset_nodelist[CPUSET_NODELIST_LEN]; | 2547 | static char cpuset_nodelist[CPUSET_NODELIST_LEN]; |
| 2623 | static DEFINE_SPINLOCK(cpuset_buffer_lock); | 2548 | static DEFINE_SPINLOCK(cpuset_buffer_lock); |
| 2549 | struct cgroup *cgrp; | ||
| 2624 | 2550 | ||
| 2625 | struct cgroup *cgrp = task_cs(tsk)->css.cgroup; | ||
| 2626 | |||
| 2627 | rcu_read_lock(); | ||
| 2628 | spin_lock(&cpuset_buffer_lock); | 2551 | spin_lock(&cpuset_buffer_lock); |
| 2552 | rcu_read_lock(); | ||
| 2629 | 2553 | ||
| 2554 | cgrp = task_cs(tsk)->css.cgroup; | ||
| 2630 | nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, | 2555 | nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, |
| 2631 | tsk->mems_allowed); | 2556 | tsk->mems_allowed); |
| 2632 | printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", | 2557 | printk(KERN_INFO "%s cpuset=", tsk->comm); |
| 2633 | tsk->comm, cgroup_name(cgrp), cpuset_nodelist); | 2558 | pr_cont_cgroup_name(cgrp); |
| 2559 | pr_cont(" mems_allowed=%s\n", cpuset_nodelist); | ||
| 2634 | 2560 | ||
| 2635 | spin_unlock(&cpuset_buffer_lock); | ||
| 2636 | rcu_read_unlock(); | 2561 | rcu_read_unlock(); |
| 2562 | spin_unlock(&cpuset_buffer_lock); | ||
| 2637 | } | 2563 | } |
| 2638 | 2564 | ||
| 2639 | /* | 2565 | /* |
| @@ -2664,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly; | |||
| 2664 | 2590 | ||
| 2665 | void __cpuset_memory_pressure_bump(void) | 2591 | void __cpuset_memory_pressure_bump(void) |
| 2666 | { | 2592 | { |
| 2667 | task_lock(current); | 2593 | rcu_read_lock(); |
| 2668 | fmeter_markevent(&task_cs(current)->fmeter); | 2594 | fmeter_markevent(&task_cs(current)->fmeter); |
| 2669 | task_unlock(current); | 2595 | rcu_read_unlock(); |
| 2670 | } | 2596 | } |
| 2671 | 2597 | ||
| 2672 | #ifdef CONFIG_PROC_PID_CPUSET | 2598 | #ifdef CONFIG_PROC_PID_CPUSET |
| @@ -2683,12 +2609,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) | |||
| 2683 | { | 2609 | { |
| 2684 | struct pid *pid; | 2610 | struct pid *pid; |
| 2685 | struct task_struct *tsk; | 2611 | struct task_struct *tsk; |
| 2686 | char *buf; | 2612 | char *buf, *p; |
| 2687 | struct cgroup_subsys_state *css; | 2613 | struct cgroup_subsys_state *css; |
| 2688 | int retval; | 2614 | int retval; |
| 2689 | 2615 | ||
| 2690 | retval = -ENOMEM; | 2616 | retval = -ENOMEM; |
| 2691 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 2617 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 2692 | if (!buf) | 2618 | if (!buf) |
| 2693 | goto out; | 2619 | goto out; |
| 2694 | 2620 | ||
| @@ -2698,14 +2624,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) | |||
| 2698 | if (!tsk) | 2624 | if (!tsk) |
| 2699 | goto out_free; | 2625 | goto out_free; |
| 2700 | 2626 | ||
| 2627 | retval = -ENAMETOOLONG; | ||
| 2701 | rcu_read_lock(); | 2628 | rcu_read_lock(); |
| 2702 | css = task_css(tsk, cpuset_subsys_id); | 2629 | css = task_css(tsk, cpuset_cgrp_id); |
| 2703 | retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); | 2630 | p = cgroup_path(css->cgroup, buf, PATH_MAX); |
| 2704 | rcu_read_unlock(); | 2631 | rcu_read_unlock(); |
| 2705 | if (retval < 0) | 2632 | if (!p) |
| 2706 | goto out_put_task; | 2633 | goto out_put_task; |
| 2707 | seq_puts(m, buf); | 2634 | seq_puts(m, p); |
| 2708 | seq_putc(m, '\n'); | 2635 | seq_putc(m, '\n'); |
| 2636 | retval = 0; | ||
| 2709 | out_put_task: | 2637 | out_put_task: |
| 2710 | put_task_struct(tsk); | 2638 | put_task_struct(tsk); |
| 2711 | out_free: | 2639 | out_free: |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 334b3980ffc1..2956c8da1605 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/pid.h> | 49 | #include <linux/pid.h> |
| 50 | #include <linux/smp.h> | 50 | #include <linux/smp.h> |
| 51 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
| 52 | #include <linux/vmacache.h> | ||
| 52 | #include <linux/rcupdate.h> | 53 | #include <linux/rcupdate.h> |
| 53 | 54 | ||
| 54 | #include <asm/cacheflush.h> | 55 | #include <asm/cacheflush.h> |
| @@ -224,10 +225,17 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) | |||
| 224 | if (!CACHE_FLUSH_IS_SAFE) | 225 | if (!CACHE_FLUSH_IS_SAFE) |
| 225 | return; | 226 | return; |
| 226 | 227 | ||
| 227 | if (current->mm && current->mm->mmap_cache) { | 228 | if (current->mm) { |
| 228 | flush_cache_range(current->mm->mmap_cache, | 229 | int i; |
| 229 | addr, addr + BREAK_INSTR_SIZE); | 230 | |
| 231 | for (i = 0; i < VMACACHE_SIZE; i++) { | ||
| 232 | if (!current->vmacache[i]) | ||
| 233 | continue; | ||
| 234 | flush_cache_range(current->vmacache[i], | ||
| 235 | addr, addr + BREAK_INSTR_SIZE); | ||
| 236 | } | ||
| 230 | } | 237 | } |
| 238 | |||
| 231 | /* Force flush instruction cache if it was outside the mm */ | 239 | /* Force flush instruction cache if it was outside the mm */ |
| 232 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); | 240 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); |
| 233 | } | 241 | } |
| @@ -1035,7 +1043,7 @@ int dbg_io_get_char(void) | |||
| 1035 | * otherwise as a quick means to stop program execution and "break" into | 1043 | * otherwise as a quick means to stop program execution and "break" into |
| 1036 | * the debugger. | 1044 | * the debugger. |
| 1037 | */ | 1045 | */ |
| 1038 | void kgdb_breakpoint(void) | 1046 | noinline void kgdb_breakpoint(void) |
| 1039 | { | 1047 | { |
| 1040 | atomic_inc(&kgdb_setting_breakpoint); | 1048 | atomic_inc(&kgdb_setting_breakpoint); |
| 1041 | wmb(); /* Sync point before breakpoint */ | 1049 | wmb(); /* Sync point before breakpoint */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index fa0b2d4ad83c..f83a71a3e46d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -231,11 +231,29 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |||
| 231 | #define NR_ACCUMULATED_SAMPLES 128 | 231 | #define NR_ACCUMULATED_SAMPLES 128 |
| 232 | static DEFINE_PER_CPU(u64, running_sample_length); | 232 | static DEFINE_PER_CPU(u64, running_sample_length); |
| 233 | 233 | ||
| 234 | void perf_sample_event_took(u64 sample_len_ns) | 234 | static void perf_duration_warn(struct irq_work *w) |
| 235 | { | 235 | { |
| 236 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); | ||
| 236 | u64 avg_local_sample_len; | 237 | u64 avg_local_sample_len; |
| 237 | u64 local_samples_len; | 238 | u64 local_samples_len; |
| 239 | |||
| 240 | local_samples_len = __get_cpu_var(running_sample_length); | ||
| 241 | avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; | ||
| 242 | |||
| 243 | printk_ratelimited(KERN_WARNING | ||
| 244 | "perf interrupt took too long (%lld > %lld), lowering " | ||
| 245 | "kernel.perf_event_max_sample_rate to %d\n", | ||
| 246 | avg_local_sample_len, allowed_ns >> 1, | ||
| 247 | sysctl_perf_event_sample_rate); | ||
| 248 | } | ||
| 249 | |||
| 250 | static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); | ||
| 251 | |||
| 252 | void perf_sample_event_took(u64 sample_len_ns) | ||
| 253 | { | ||
| 238 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); | 254 | u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); |
| 255 | u64 avg_local_sample_len; | ||
| 256 | u64 local_samples_len; | ||
| 239 | 257 | ||
| 240 | if (allowed_ns == 0) | 258 | if (allowed_ns == 0) |
| 241 | return; | 259 | return; |
| @@ -263,13 +281,14 @@ void perf_sample_event_took(u64 sample_len_ns) | |||
| 263 | sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; | 281 | sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; |
| 264 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; | 282 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| 265 | 283 | ||
| 266 | printk_ratelimited(KERN_WARNING | ||
| 267 | "perf samples too long (%lld > %lld), lowering " | ||
| 268 | "kernel.perf_event_max_sample_rate to %d\n", | ||
| 269 | avg_local_sample_len, allowed_ns, | ||
| 270 | sysctl_perf_event_sample_rate); | ||
| 271 | |||
| 272 | update_perf_cpu_limits(); | 284 | update_perf_cpu_limits(); |
| 285 | |||
| 286 | if (!irq_work_queue(&perf_duration_work)) { | ||
| 287 | early_printk("perf interrupt took too long (%lld > %lld), lowering " | ||
| 288 | "kernel.perf_event_max_sample_rate to %d\n", | ||
| 289 | avg_local_sample_len, allowed_ns >> 1, | ||
| 290 | sysctl_perf_event_sample_rate); | ||
| 291 | } | ||
| 273 | } | 292 | } |
| 274 | 293 | ||
| 275 | static atomic64_t perf_event_id; | 294 | static atomic64_t perf_event_id; |
| @@ -342,7 +361,7 @@ struct perf_cgroup { | |||
| 342 | static inline struct perf_cgroup * | 361 | static inline struct perf_cgroup * |
| 343 | perf_cgroup_from_task(struct task_struct *task) | 362 | perf_cgroup_from_task(struct task_struct *task) |
| 344 | { | 363 | { |
| 345 | return container_of(task_css(task, perf_subsys_id), | 364 | return container_of(task_css(task, perf_event_cgrp_id), |
| 346 | struct perf_cgroup, css); | 365 | struct perf_cgroup, css); |
| 347 | } | 366 | } |
| 348 | 367 | ||
| @@ -370,11 +389,6 @@ perf_cgroup_match(struct perf_event *event) | |||
| 370 | event->cgrp->css.cgroup); | 389 | event->cgrp->css.cgroup); |
| 371 | } | 390 | } |
| 372 | 391 | ||
| 373 | static inline bool perf_tryget_cgroup(struct perf_event *event) | ||
| 374 | { | ||
| 375 | return css_tryget(&event->cgrp->css); | ||
| 376 | } | ||
| 377 | |||
| 378 | static inline void perf_put_cgroup(struct perf_event *event) | 392 | static inline void perf_put_cgroup(struct perf_event *event) |
| 379 | { | 393 | { |
| 380 | css_put(&event->cgrp->css); | 394 | css_put(&event->cgrp->css); |
| @@ -593,9 +607,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 593 | if (!f.file) | 607 | if (!f.file) |
| 594 | return -EBADF; | 608 | return -EBADF; |
| 595 | 609 | ||
| 596 | rcu_read_lock(); | 610 | css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); |
| 597 | |||
| 598 | css = css_from_dir(f.file->f_dentry, &perf_subsys); | ||
| 599 | if (IS_ERR(css)) { | 611 | if (IS_ERR(css)) { |
| 600 | ret = PTR_ERR(css); | 612 | ret = PTR_ERR(css); |
| 601 | goto out; | 613 | goto out; |
| @@ -604,13 +616,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 604 | cgrp = container_of(css, struct perf_cgroup, css); | 616 | cgrp = container_of(css, struct perf_cgroup, css); |
| 605 | event->cgrp = cgrp; | 617 | event->cgrp = cgrp; |
| 606 | 618 | ||
| 607 | /* must be done before we fput() the file */ | ||
| 608 | if (!perf_tryget_cgroup(event)) { | ||
| 609 | event->cgrp = NULL; | ||
| 610 | ret = -ENOENT; | ||
| 611 | goto out; | ||
| 612 | } | ||
| 613 | |||
| 614 | /* | 619 | /* |
| 615 | * all events in a group must monitor | 620 | * all events in a group must monitor |
| 616 | * the same cgroup because a task belongs | 621 | * the same cgroup because a task belongs |
| @@ -621,7 +626,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 621 | ret = -EINVAL; | 626 | ret = -EINVAL; |
| 622 | } | 627 | } |
| 623 | out: | 628 | out: |
| 624 | rcu_read_unlock(); | ||
| 625 | fdput(f); | 629 | fdput(f); |
| 626 | return ret; | 630 | return ret; |
| 627 | } | 631 | } |
| @@ -1714,7 +1718,7 @@ group_sched_in(struct perf_event *group_event, | |||
| 1714 | struct perf_event_context *ctx) | 1718 | struct perf_event_context *ctx) |
| 1715 | { | 1719 | { |
| 1716 | struct perf_event *event, *partial_group = NULL; | 1720 | struct perf_event *event, *partial_group = NULL; |
| 1717 | struct pmu *pmu = group_event->pmu; | 1721 | struct pmu *pmu = ctx->pmu; |
| 1718 | u64 now = ctx->time; | 1722 | u64 now = ctx->time; |
| 1719 | bool simulate = false; | 1723 | bool simulate = false; |
| 1720 | 1724 | ||
| @@ -2563,8 +2567,6 @@ static void perf_branch_stack_sched_in(struct task_struct *prev, | |||
| 2563 | if (cpuctx->ctx.nr_branch_stack > 0 | 2567 | if (cpuctx->ctx.nr_branch_stack > 0 |
| 2564 | && pmu->flush_branch_stack) { | 2568 | && pmu->flush_branch_stack) { |
| 2565 | 2569 | ||
| 2566 | pmu = cpuctx->ctx.pmu; | ||
| 2567 | |||
| 2568 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | 2570 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 2569 | 2571 | ||
| 2570 | perf_pmu_disable(pmu); | 2572 | perf_pmu_disable(pmu); |
| @@ -6294,7 +6296,7 @@ static int perf_event_idx_default(struct perf_event *event) | |||
| 6294 | * Ensures all contexts with the same task_ctx_nr have the same | 6296 | * Ensures all contexts with the same task_ctx_nr have the same |
| 6295 | * pmu_cpu_context too. | 6297 | * pmu_cpu_context too. |
| 6296 | */ | 6298 | */ |
| 6297 | static void *find_pmu_context(int ctxn) | 6299 | static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) |
| 6298 | { | 6300 | { |
| 6299 | struct pmu *pmu; | 6301 | struct pmu *pmu; |
| 6300 | 6302 | ||
| @@ -8036,7 +8038,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, | |||
| 8036 | { | 8038 | { |
| 8037 | struct task_struct *task; | 8039 | struct task_struct *task; |
| 8038 | 8040 | ||
| 8039 | cgroup_taskset_for_each(task, css, tset) | 8041 | cgroup_taskset_for_each(task, tset) |
| 8040 | task_function_call(task, __perf_cgroup_move, task); | 8042 | task_function_call(task, __perf_cgroup_move, task); |
| 8041 | } | 8043 | } |
| 8042 | 8044 | ||
| @@ -8055,9 +8057,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, | |||
| 8055 | task_function_call(task, __perf_cgroup_move, task); | 8057 | task_function_call(task, __perf_cgroup_move, task); |
| 8056 | } | 8058 | } |
| 8057 | 8059 | ||
| 8058 | struct cgroup_subsys perf_subsys = { | 8060 | struct cgroup_subsys perf_event_cgrp_subsys = { |
| 8059 | .name = "perf_event", | ||
| 8060 | .subsys_id = perf_subsys_id, | ||
| 8061 | .css_alloc = perf_cgroup_css_alloc, | 8061 | .css_alloc = perf_cgroup_css_alloc, |
| 8062 | .css_free = perf_cgroup_css_free, | 8062 | .css_free = perf_cgroup_css_free, |
| 8063 | .exit = perf_cgroup_exit, | 8063 | .exit = perf_cgroup_exit, |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 307d87c0991a..04709b66369d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
| @@ -1804,6 +1804,11 @@ static bool handle_trampoline(struct pt_regs *regs) | |||
| 1804 | return true; | 1804 | return true; |
| 1805 | } | 1805 | } |
| 1806 | 1806 | ||
| 1807 | bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) | ||
| 1808 | { | ||
| 1809 | return false; | ||
| 1810 | } | ||
| 1811 | |||
| 1807 | /* | 1812 | /* |
| 1808 | * Run handler and ask thread to singlestep. | 1813 | * Run handler and ask thread to singlestep. |
| 1809 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | 1814 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
| @@ -1858,7 +1863,11 @@ static void handle_swbp(struct pt_regs *regs) | |||
| 1858 | if (!get_utask()) | 1863 | if (!get_utask()) |
| 1859 | goto out; | 1864 | goto out; |
| 1860 | 1865 | ||
| 1866 | if (arch_uprobe_ignore(&uprobe->arch, regs)) | ||
| 1867 | goto out; | ||
| 1868 | |||
| 1861 | handler_chain(uprobe, regs); | 1869 | handler_chain(uprobe, regs); |
| 1870 | |||
| 1862 | if (can_skip_sstep(uprobe, regs)) | 1871 | if (can_skip_sstep(uprobe, regs)) |
| 1863 | goto out; | 1872 | goto out; |
| 1864 | 1873 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 1e77fc645317..6ed6a1d552b5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -570,7 +570,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, | |||
| 570 | if (same_thread_group(p->real_parent, father)) | 570 | if (same_thread_group(p->real_parent, father)) |
| 571 | return; | 571 | return; |
| 572 | 572 | ||
| 573 | /* We don't want people slaying init. */ | 573 | /* We don't want people slaying init. */ |
| 574 | p->exit_signal = SIGCHLD; | 574 | p->exit_signal = SIGCHLD; |
| 575 | 575 | ||
| 576 | /* If it has exited notify the new parent about this child's death. */ | 576 | /* If it has exited notify the new parent about this child's death. */ |
| @@ -784,9 +784,10 @@ void do_exit(long code) | |||
| 784 | exit_shm(tsk); | 784 | exit_shm(tsk); |
| 785 | exit_files(tsk); | 785 | exit_files(tsk); |
| 786 | exit_fs(tsk); | 786 | exit_fs(tsk); |
| 787 | if (group_dead) | ||
| 788 | disassociate_ctty(1); | ||
| 787 | exit_task_namespaces(tsk); | 789 | exit_task_namespaces(tsk); |
| 788 | exit_task_work(tsk); | 790 | exit_task_work(tsk); |
| 789 | check_stack_usage(); | ||
| 790 | exit_thread(); | 791 | exit_thread(); |
| 791 | 792 | ||
| 792 | /* | 793 | /* |
| @@ -797,21 +798,17 @@ void do_exit(long code) | |||
| 797 | */ | 798 | */ |
| 798 | perf_event_exit_task(tsk); | 799 | perf_event_exit_task(tsk); |
| 799 | 800 | ||
| 800 | cgroup_exit(tsk, 1); | 801 | cgroup_exit(tsk); |
| 801 | |||
| 802 | if (group_dead) | ||
| 803 | disassociate_ctty(1); | ||
| 804 | 802 | ||
| 805 | module_put(task_thread_info(tsk)->exec_domain->module); | 803 | module_put(task_thread_info(tsk)->exec_domain->module); |
| 806 | 804 | ||
| 807 | proc_exit_connector(tsk); | ||
| 808 | |||
| 809 | /* | 805 | /* |
| 810 | * FIXME: do that only when needed, using sched_exit tracepoint | 806 | * FIXME: do that only when needed, using sched_exit tracepoint |
| 811 | */ | 807 | */ |
| 812 | flush_ptrace_hw_breakpoint(tsk); | 808 | flush_ptrace_hw_breakpoint(tsk); |
| 813 | 809 | ||
| 814 | exit_notify(tsk, group_dead); | 810 | exit_notify(tsk, group_dead); |
| 811 | proc_exit_connector(tsk); | ||
| 815 | #ifdef CONFIG_NUMA | 812 | #ifdef CONFIG_NUMA |
| 816 | task_lock(tsk); | 813 | task_lock(tsk); |
| 817 | mpol_put(tsk->mempolicy); | 814 | mpol_put(tsk->mempolicy); |
| @@ -844,6 +841,7 @@ void do_exit(long code) | |||
| 844 | 841 | ||
| 845 | validate_creds_for_do_exit(tsk); | 842 | validate_creds_for_do_exit(tsk); |
| 846 | 843 | ||
| 844 | check_stack_usage(); | ||
| 847 | preempt_disable(); | 845 | preempt_disable(); |
| 848 | if (tsk->nr_dirtied) | 846 | if (tsk->nr_dirtied) |
| 849 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | 847 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); |
| @@ -1038,17 +1036,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1038 | return wait_noreap_copyout(wo, p, pid, uid, why, status); | 1036 | return wait_noreap_copyout(wo, p, pid, uid, why, status); |
| 1039 | } | 1037 | } |
| 1040 | 1038 | ||
| 1039 | traced = ptrace_reparented(p); | ||
| 1041 | /* | 1040 | /* |
| 1042 | * Try to move the task's state to DEAD | 1041 | * Move the task's state to DEAD/TRACE, only one thread can do this. |
| 1043 | * only one thread is allowed to do this: | ||
| 1044 | */ | 1042 | */ |
| 1045 | state = xchg(&p->exit_state, EXIT_DEAD); | 1043 | state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD; |
| 1046 | if (state != EXIT_ZOMBIE) { | 1044 | if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) |
| 1047 | BUG_ON(state != EXIT_DEAD); | ||
| 1048 | return 0; | 1045 | return 0; |
| 1049 | } | ||
| 1050 | |||
| 1051 | traced = ptrace_reparented(p); | ||
| 1052 | /* | 1046 | /* |
| 1053 | * It can be ptraced but not reparented, check | 1047 | * It can be ptraced but not reparented, check |
| 1054 | * thread_group_leader() to filter out sub-threads. | 1048 | * thread_group_leader() to filter out sub-threads. |
| @@ -1109,7 +1103,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1109 | 1103 | ||
| 1110 | /* | 1104 | /* |
| 1111 | * Now we are sure this task is interesting, and no other | 1105 | * Now we are sure this task is interesting, and no other |
| 1112 | * thread can reap it because we set its state to EXIT_DEAD. | 1106 | * thread can reap it because we its state == DEAD/TRACE. |
| 1113 | */ | 1107 | */ |
| 1114 | read_unlock(&tasklist_lock); | 1108 | read_unlock(&tasklist_lock); |
| 1115 | 1109 | ||
| @@ -1146,22 +1140,19 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1146 | if (!retval) | 1140 | if (!retval) |
| 1147 | retval = pid; | 1141 | retval = pid; |
| 1148 | 1142 | ||
| 1149 | if (traced) { | 1143 | if (state == EXIT_TRACE) { |
| 1150 | write_lock_irq(&tasklist_lock); | 1144 | write_lock_irq(&tasklist_lock); |
| 1151 | /* We dropped tasklist, ptracer could die and untrace */ | 1145 | /* We dropped tasklist, ptracer could die and untrace */ |
| 1152 | ptrace_unlink(p); | 1146 | ptrace_unlink(p); |
| 1153 | /* | 1147 | |
| 1154 | * If this is not a sub-thread, notify the parent. | 1148 | /* If parent wants a zombie, don't release it now */ |
| 1155 | * If parent wants a zombie, don't release it now. | 1149 | state = EXIT_ZOMBIE; |
| 1156 | */ | 1150 | if (do_notify_parent(p, p->exit_signal)) |
| 1157 | if (thread_group_leader(p) && | 1151 | state = EXIT_DEAD; |
| 1158 | !do_notify_parent(p, p->exit_signal)) { | 1152 | p->exit_state = state; |
| 1159 | p->exit_state = EXIT_ZOMBIE; | ||
| 1160 | p = NULL; | ||
| 1161 | } | ||
| 1162 | write_unlock_irq(&tasklist_lock); | 1153 | write_unlock_irq(&tasklist_lock); |
| 1163 | } | 1154 | } |
| 1164 | if (p != NULL) | 1155 | if (state == EXIT_DEAD) |
| 1165 | release_task(p); | 1156 | release_task(p); |
| 1166 | 1157 | ||
| 1167 | return retval; | 1158 | return retval; |
| @@ -1338,7 +1329,12 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
| 1338 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1329 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
| 1339 | struct task_struct *p) | 1330 | struct task_struct *p) |
| 1340 | { | 1331 | { |
| 1341 | int ret = eligible_child(wo, p); | 1332 | int ret; |
| 1333 | |||
| 1334 | if (unlikely(p->exit_state == EXIT_DEAD)) | ||
| 1335 | return 0; | ||
| 1336 | |||
| 1337 | ret = eligible_child(wo, p); | ||
| 1342 | if (!ret) | 1338 | if (!ret) |
| 1343 | return ret; | 1339 | return ret; |
| 1344 | 1340 | ||
| @@ -1356,33 +1352,44 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1356 | return 0; | 1352 | return 0; |
| 1357 | } | 1353 | } |
| 1358 | 1354 | ||
| 1359 | /* dead body doesn't have much to contribute */ | 1355 | if (unlikely(p->exit_state == EXIT_TRACE)) { |
| 1360 | if (unlikely(p->exit_state == EXIT_DEAD)) { | ||
| 1361 | /* | 1356 | /* |
| 1362 | * But do not ignore this task until the tracer does | 1357 | * ptrace == 0 means we are the natural parent. In this case |
| 1363 | * wait_task_zombie()->do_notify_parent(). | 1358 | * we should clear notask_error, debugger will notify us. |
| 1364 | */ | 1359 | */ |
| 1365 | if (likely(!ptrace) && unlikely(ptrace_reparented(p))) | 1360 | if (likely(!ptrace)) |
| 1366 | wo->notask_error = 0; | 1361 | wo->notask_error = 0; |
| 1367 | return 0; | 1362 | return 0; |
| 1368 | } | 1363 | } |
| 1369 | 1364 | ||
| 1370 | /* slay zombie? */ | 1365 | if (likely(!ptrace) && unlikely(p->ptrace)) { |
| 1371 | if (p->exit_state == EXIT_ZOMBIE) { | ||
| 1372 | /* | 1366 | /* |
| 1373 | * A zombie ptracee is only visible to its ptracer. | 1367 | * If it is traced by its real parent's group, just pretend |
| 1374 | * Notification and reaping will be cascaded to the real | 1368 | * the caller is ptrace_do_wait() and reap this child if it |
| 1375 | * parent when the ptracer detaches. | 1369 | * is zombie. |
| 1370 | * | ||
| 1371 | * This also hides group stop state from real parent; otherwise | ||
| 1372 | * a single stop can be reported twice as group and ptrace stop. | ||
| 1373 | * If a ptracer wants to distinguish these two events for its | ||
| 1374 | * own children it should create a separate process which takes | ||
| 1375 | * the role of real parent. | ||
| 1376 | */ | 1376 | */ |
| 1377 | if (likely(!ptrace) && unlikely(p->ptrace)) { | 1377 | if (!ptrace_reparented(p)) |
| 1378 | /* it will become visible, clear notask_error */ | 1378 | ptrace = 1; |
| 1379 | wo->notask_error = 0; | 1379 | } |
| 1380 | return 0; | ||
| 1381 | } | ||
| 1382 | 1380 | ||
| 1381 | /* slay zombie? */ | ||
| 1382 | if (p->exit_state == EXIT_ZOMBIE) { | ||
| 1383 | /* we don't reap group leaders with subthreads */ | 1383 | /* we don't reap group leaders with subthreads */ |
| 1384 | if (!delay_group_leader(p)) | 1384 | if (!delay_group_leader(p)) { |
| 1385 | return wait_task_zombie(wo, p); | 1385 | /* |
| 1386 | * A zombie ptracee is only visible to its ptracer. | ||
| 1387 | * Notification and reaping will be cascaded to the | ||
| 1388 | * real parent when the ptracer detaches. | ||
| 1389 | */ | ||
| 1390 | if (unlikely(ptrace) || likely(!p->ptrace)) | ||
| 1391 | return wait_task_zombie(wo, p); | ||
| 1392 | } | ||
| 1386 | 1393 | ||
| 1387 | /* | 1394 | /* |
| 1388 | * Allow access to stopped/continued state via zombie by | 1395 | * Allow access to stopped/continued state via zombie by |
| @@ -1408,19 +1415,6 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1408 | wo->notask_error = 0; | 1415 | wo->notask_error = 0; |
| 1409 | } else { | 1416 | } else { |
| 1410 | /* | 1417 | /* |
| 1411 | * If @p is ptraced by a task in its real parent's group, | ||
| 1412 | * hide group stop/continued state when looking at @p as | ||
| 1413 | * the real parent; otherwise, a single stop can be | ||
| 1414 | * reported twice as group and ptrace stops. | ||
| 1415 | * | ||
| 1416 | * If a ptracer wants to distinguish the two events for its | ||
| 1417 | * own children, it should create a separate process which | ||
| 1418 | * takes the role of real parent. | ||
| 1419 | */ | ||
| 1420 | if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) | ||
| 1421 | return 0; | ||
| 1422 | |||
| 1423 | /* | ||
| 1424 | * @p is alive and it's gonna stop, continue or exit, so | 1418 | * @p is alive and it's gonna stop, continue or exit, so |
| 1425 | * there always is something to wait for. | 1419 | * there always is something to wait for. |
| 1426 | */ | 1420 | */ |
diff --git a/kernel/extable.c b/kernel/extable.c index 763faf037ec1..d8a6446adbcb 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
| @@ -36,7 +36,7 @@ extern struct exception_table_entry __start___ex_table[]; | |||
| 36 | extern struct exception_table_entry __stop___ex_table[]; | 36 | extern struct exception_table_entry __stop___ex_table[]; |
| 37 | 37 | ||
| 38 | /* Cleared by build time tools if the table is already sorted. */ | 38 | /* Cleared by build time tools if the table is already sorted. */ |
| 39 | u32 __initdata main_extable_sort_needed = 1; | 39 | u32 __initdata __visible main_extable_sort_needed = 1; |
| 40 | 40 | ||
| 41 | /* Sort the kernel's built-in exception table */ | 41 | /* Sort the kernel's built-in exception table */ |
| 42 | void __init sort_main_extable(void) | 42 | void __init sort_main_extable(void) |
diff --git a/kernel/fork.c b/kernel/fork.c index a17621c6cd42..54a8d26f612f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/mman.h> | 28 | #include <linux/mman.h> |
| 29 | #include <linux/mmu_notifier.h> | 29 | #include <linux/mmu_notifier.h> |
| 30 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
| 31 | #include <linux/mm.h> | ||
| 32 | #include <linux/vmacache.h> | ||
| 31 | #include <linux/nsproxy.h> | 33 | #include <linux/nsproxy.h> |
| 32 | #include <linux/capability.h> | 34 | #include <linux/capability.h> |
| 33 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
| @@ -71,6 +73,7 @@ | |||
| 71 | #include <linux/signalfd.h> | 73 | #include <linux/signalfd.h> |
| 72 | #include <linux/uprobes.h> | 74 | #include <linux/uprobes.h> |
| 73 | #include <linux/aio.h> | 75 | #include <linux/aio.h> |
| 76 | #include <linux/compiler.h> | ||
| 74 | 77 | ||
| 75 | #include <asm/pgtable.h> | 78 | #include <asm/pgtable.h> |
| 76 | #include <asm/pgalloc.h> | 79 | #include <asm/pgalloc.h> |
| @@ -237,6 +240,7 @@ void __put_task_struct(struct task_struct *tsk) | |||
| 237 | WARN_ON(atomic_read(&tsk->usage)); | 240 | WARN_ON(atomic_read(&tsk->usage)); |
| 238 | WARN_ON(tsk == current); | 241 | WARN_ON(tsk == current); |
| 239 | 242 | ||
| 243 | task_numa_free(tsk); | ||
| 240 | security_task_free(tsk); | 244 | security_task_free(tsk); |
| 241 | exit_creds(tsk); | 245 | exit_creds(tsk); |
| 242 | delayacct_tsk_free(tsk); | 246 | delayacct_tsk_free(tsk); |
| @@ -283,7 +287,7 @@ void __init fork_init(unsigned long mempages) | |||
| 283 | init_task.signal->rlim[RLIMIT_NPROC]; | 287 | init_task.signal->rlim[RLIMIT_NPROC]; |
| 284 | } | 288 | } |
| 285 | 289 | ||
| 286 | int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, | 290 | int __weak arch_dup_task_struct(struct task_struct *dst, |
| 287 | struct task_struct *src) | 291 | struct task_struct *src) |
| 288 | { | 292 | { |
| 289 | *dst = *src; | 293 | *dst = *src; |
| @@ -363,7 +367,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 363 | 367 | ||
| 364 | mm->locked_vm = 0; | 368 | mm->locked_vm = 0; |
| 365 | mm->mmap = NULL; | 369 | mm->mmap = NULL; |
| 366 | mm->mmap_cache = NULL; | 370 | mm->vmacache_seqnum = 0; |
| 367 | mm->map_count = 0; | 371 | mm->map_count = 0; |
| 368 | cpumask_clear(mm_cpumask(mm)); | 372 | cpumask_clear(mm_cpumask(mm)); |
| 369 | mm->mm_rb = RB_ROOT; | 373 | mm->mm_rb = RB_ROOT; |
| @@ -529,8 +533,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 529 | atomic_set(&mm->mm_count, 1); | 533 | atomic_set(&mm->mm_count, 1); |
| 530 | init_rwsem(&mm->mmap_sem); | 534 | init_rwsem(&mm->mmap_sem); |
| 531 | INIT_LIST_HEAD(&mm->mmlist); | 535 | INIT_LIST_HEAD(&mm->mmlist); |
| 532 | mm->flags = (current->mm) ? | ||
| 533 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | ||
| 534 | mm->core_state = NULL; | 536 | mm->core_state = NULL; |
| 535 | atomic_long_set(&mm->nr_ptes, 0); | 537 | atomic_long_set(&mm->nr_ptes, 0); |
| 536 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); | 538 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
| @@ -539,8 +541,15 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 539 | mm_init_owner(mm, p); | 541 | mm_init_owner(mm, p); |
| 540 | clear_tlb_flush_pending(mm); | 542 | clear_tlb_flush_pending(mm); |
| 541 | 543 | ||
| 542 | if (likely(!mm_alloc_pgd(mm))) { | 544 | if (current->mm) { |
| 545 | mm->flags = current->mm->flags & MMF_INIT_MASK; | ||
| 546 | mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; | ||
| 547 | } else { | ||
| 548 | mm->flags = default_dump_filter; | ||
| 543 | mm->def_flags = 0; | 549 | mm->def_flags = 0; |
| 550 | } | ||
| 551 | |||
| 552 | if (likely(!mm_alloc_pgd(mm))) { | ||
| 544 | mmu_notifier_mm_init(mm); | 553 | mmu_notifier_mm_init(mm); |
| 545 | return mm; | 554 | return mm; |
| 546 | } | 555 | } |
| @@ -876,6 +885,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) | |||
| 876 | if (!oldmm) | 885 | if (!oldmm) |
| 877 | return 0; | 886 | return 0; |
| 878 | 887 | ||
| 888 | /* initialize the new vmacache entries */ | ||
| 889 | vmacache_flush(tsk); | ||
| 890 | |||
| 879 | if (clone_flags & CLONE_VM) { | 891 | if (clone_flags & CLONE_VM) { |
| 880 | atomic_inc(&oldmm->mm_users); | 892 | atomic_inc(&oldmm->mm_users); |
| 881 | mm = oldmm; | 893 | mm = oldmm; |
| @@ -1069,15 +1081,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 1069 | return 0; | 1081 | return 0; |
| 1070 | } | 1082 | } |
| 1071 | 1083 | ||
| 1072 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | ||
| 1073 | { | ||
| 1074 | unsigned long new_flags = p->flags; | ||
| 1075 | |||
| 1076 | new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); | ||
| 1077 | new_flags |= PF_FORKNOEXEC; | ||
| 1078 | p->flags = new_flags; | ||
| 1079 | } | ||
| 1080 | |||
| 1081 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | 1084 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
| 1082 | { | 1085 | { |
| 1083 | current->clear_child_tid = tidptr; | 1086 | current->clear_child_tid = tidptr; |
| @@ -1227,7 +1230,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1227 | goto bad_fork_cleanup_count; | 1230 | goto bad_fork_cleanup_count; |
| 1228 | 1231 | ||
| 1229 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ | 1232 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
| 1230 | copy_flags(clone_flags, p); | 1233 | p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); |
| 1234 | p->flags |= PF_FORKNOEXEC; | ||
| 1231 | INIT_LIST_HEAD(&p->children); | 1235 | INIT_LIST_HEAD(&p->children); |
| 1232 | INIT_LIST_HEAD(&p->sibling); | 1236 | INIT_LIST_HEAD(&p->sibling); |
| 1233 | rcu_copy_process(p); | 1237 | rcu_copy_process(p); |
| @@ -1271,9 +1275,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1271 | if (IS_ERR(p->mempolicy)) { | 1275 | if (IS_ERR(p->mempolicy)) { |
| 1272 | retval = PTR_ERR(p->mempolicy); | 1276 | retval = PTR_ERR(p->mempolicy); |
| 1273 | p->mempolicy = NULL; | 1277 | p->mempolicy = NULL; |
| 1274 | goto bad_fork_cleanup_cgroup; | 1278 | goto bad_fork_cleanup_threadgroup_lock; |
| 1275 | } | 1279 | } |
| 1276 | mpol_fix_fork_child_flag(p); | ||
| 1277 | #endif | 1280 | #endif |
| 1278 | #ifdef CONFIG_CPUSETS | 1281 | #ifdef CONFIG_CPUSETS |
| 1279 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; | 1282 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; |
| @@ -1524,11 +1527,10 @@ bad_fork_cleanup_policy: | |||
| 1524 | perf_event_free_task(p); | 1527 | perf_event_free_task(p); |
| 1525 | #ifdef CONFIG_NUMA | 1528 | #ifdef CONFIG_NUMA |
| 1526 | mpol_put(p->mempolicy); | 1529 | mpol_put(p->mempolicy); |
| 1527 | bad_fork_cleanup_cgroup: | 1530 | bad_fork_cleanup_threadgroup_lock: |
| 1528 | #endif | 1531 | #endif |
| 1529 | if (clone_flags & CLONE_THREAD) | 1532 | if (clone_flags & CLONE_THREAD) |
| 1530 | threadgroup_change_end(current); | 1533 | threadgroup_change_end(current); |
| 1531 | cgroup_exit(p, 0); | ||
| 1532 | delayacct_tsk_free(p); | 1534 | delayacct_tsk_free(p); |
| 1533 | module_put(task_thread_info(p)->exec_domain->module); | 1535 | module_put(task_thread_info(p)->exec_domain->module); |
| 1534 | bad_fork_cleanup_count: | 1536 | bad_fork_cleanup_count: |
diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..5f589279e462 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -70,7 +70,10 @@ | |||
| 70 | #include "locking/rtmutex_common.h" | 70 | #include "locking/rtmutex_common.h" |
| 71 | 71 | ||
| 72 | /* | 72 | /* |
| 73 | * Basic futex operation and ordering guarantees: | 73 | * READ this before attempting to hack on futexes! |
| 74 | * | ||
| 75 | * Basic futex operation and ordering guarantees | ||
| 76 | * ============================================= | ||
| 74 | * | 77 | * |
| 75 | * The waiter reads the futex value in user space and calls | 78 | * The waiter reads the futex value in user space and calls |
| 76 | * futex_wait(). This function computes the hash bucket and acquires | 79 | * futex_wait(). This function computes the hash bucket and acquires |
| @@ -119,7 +122,7 @@ | |||
| 119 | * sys_futex(WAIT, futex, val); | 122 | * sys_futex(WAIT, futex, val); |
| 120 | * futex_wait(futex, val); | 123 | * futex_wait(futex, val); |
| 121 | * | 124 | * |
| 122 | * waiters++; | 125 | * waiters++; (a) |
| 123 | * mb(); (A) <-- paired with -. | 126 | * mb(); (A) <-- paired with -. |
| 124 | * | | 127 | * | |
| 125 | * lock(hash_bucket(futex)); | | 128 | * lock(hash_bucket(futex)); | |
| @@ -135,14 +138,14 @@ | |||
| 135 | * unlock(hash_bucket(futex)); | 138 | * unlock(hash_bucket(futex)); |
| 136 | * schedule(); if (waiters) | 139 | * schedule(); if (waiters) |
| 137 | * lock(hash_bucket(futex)); | 140 | * lock(hash_bucket(futex)); |
| 138 | * wake_waiters(futex); | 141 | * else wake_waiters(futex); |
| 139 | * unlock(hash_bucket(futex)); | 142 | * waiters--; (b) unlock(hash_bucket(futex)); |
| 140 | * | 143 | * |
| 141 | * Where (A) orders the waiters increment and the futex value read -- this | 144 | * Where (A) orders the waiters increment and the futex value read through |
| 142 | * is guaranteed by the head counter in the hb spinlock; and where (B) | 145 | * atomic operations (see hb_waiters_inc) and where (B) orders the write |
| 143 | * orders the write to futex and the waiters read -- this is done by the | 146 | * to futex and the waiters read -- this is done by the barriers in |
| 144 | * barriers in get_futex_key_refs(), through either ihold or atomic_inc, | 147 | * get_futex_key_refs(), through either ihold or atomic_inc, depending on the |
| 145 | * depending on the futex type. | 148 | * futex type. |
| 146 | * | 149 | * |
| 147 | * This yields the following case (where X:=waiters, Y:=futex): | 150 | * This yields the following case (where X:=waiters, Y:=futex): |
| 148 | * | 151 | * |
| @@ -155,9 +158,22 @@ | |||
| 155 | * Which guarantees that x==0 && y==0 is impossible; which translates back into | 158 | * Which guarantees that x==0 && y==0 is impossible; which translates back into |
| 156 | * the guarantee that we cannot both miss the futex variable change and the | 159 | * the guarantee that we cannot both miss the futex variable change and the |
| 157 | * enqueue. | 160 | * enqueue. |
| 161 | * | ||
| 162 | * Note that a new waiter is accounted for in (a) even when it is possible that | ||
| 163 | * the wait call can return error, in which case we backtrack from it in (b). | ||
| 164 | * Refer to the comment in queue_lock(). | ||
| 165 | * | ||
| 166 | * Similarly, in order to account for waiters being requeued on another | ||
| 167 | * address we always increment the waiters for the destination bucket before | ||
| 168 | * acquiring the lock. It then decrements them again after releasing it - | ||
| 169 | * the code that actually moves the futex(es) between hash buckets (requeue_futex) | ||
| 170 | * will do the additional required waiter count housekeeping. This is done for | ||
| 171 | * double_lock_hb() and double_unlock_hb(), respectively. | ||
| 158 | */ | 172 | */ |
| 159 | 173 | ||
| 174 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG | ||
| 160 | int __read_mostly futex_cmpxchg_enabled; | 175 | int __read_mostly futex_cmpxchg_enabled; |
| 176 | #endif | ||
| 161 | 177 | ||
| 162 | /* | 178 | /* |
| 163 | * Futex flags used to encode options to functions and preserve them across | 179 | * Futex flags used to encode options to functions and preserve them across |
| @@ -234,6 +250,7 @@ static const struct futex_q futex_q_init = { | |||
| 234 | * waiting on a futex. | 250 | * waiting on a futex. |
| 235 | */ | 251 | */ |
| 236 | struct futex_hash_bucket { | 252 | struct futex_hash_bucket { |
| 253 | atomic_t waiters; | ||
| 237 | spinlock_t lock; | 254 | spinlock_t lock; |
| 238 | struct plist_head chain; | 255 | struct plist_head chain; |
| 239 | } ____cacheline_aligned_in_smp; | 256 | } ____cacheline_aligned_in_smp; |
| @@ -253,22 +270,37 @@ static inline void futex_get_mm(union futex_key *key) | |||
| 253 | smp_mb__after_atomic_inc(); | 270 | smp_mb__after_atomic_inc(); |
| 254 | } | 271 | } |
| 255 | 272 | ||
| 256 | static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) | 273 | /* |
| 274 | * Reflects a new waiter being added to the waitqueue. | ||
| 275 | */ | ||
| 276 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) | ||
| 257 | { | 277 | { |
| 258 | #ifdef CONFIG_SMP | 278 | #ifdef CONFIG_SMP |
| 279 | atomic_inc(&hb->waiters); | ||
| 259 | /* | 280 | /* |
| 260 | * Tasks trying to enter the critical region are most likely | 281 | * Full barrier (A), see the ordering comment above. |
| 261 | * potential waiters that will be added to the plist. Ensure | ||
| 262 | * that wakers won't miss to-be-slept tasks in the window between | ||
| 263 | * the wait call and the actual plist_add. | ||
| 264 | */ | 282 | */ |
| 265 | if (spin_is_locked(&hb->lock)) | 283 | smp_mb__after_atomic_inc(); |
| 266 | return true; | 284 | #endif |
| 267 | smp_rmb(); /* Make sure we check the lock state first */ | 285 | } |
| 268 | 286 | ||
| 269 | return !plist_head_empty(&hb->chain); | 287 | /* |
| 288 | * Reflects a waiter being removed from the waitqueue by wakeup | ||
| 289 | * paths. | ||
| 290 | */ | ||
| 291 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) | ||
| 292 | { | ||
| 293 | #ifdef CONFIG_SMP | ||
| 294 | atomic_dec(&hb->waiters); | ||
| 295 | #endif | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) | ||
| 299 | { | ||
| 300 | #ifdef CONFIG_SMP | ||
| 301 | return atomic_read(&hb->waiters); | ||
| 270 | #else | 302 | #else |
| 271 | return true; | 303 | return 1; |
| 272 | #endif | 304 | #endif |
| 273 | } | 305 | } |
| 274 | 306 | ||
| @@ -954,6 +986,7 @@ static void __unqueue_futex(struct futex_q *q) | |||
| 954 | 986 | ||
| 955 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 987 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
| 956 | plist_del(&q->list, &hb->chain); | 988 | plist_del(&q->list, &hb->chain); |
| 989 | hb_waiters_dec(hb); | ||
| 957 | } | 990 | } |
| 958 | 991 | ||
| 959 | /* | 992 | /* |
| @@ -1257,7 +1290,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
| 1257 | */ | 1290 | */ |
| 1258 | if (likely(&hb1->chain != &hb2->chain)) { | 1291 | if (likely(&hb1->chain != &hb2->chain)) { |
| 1259 | plist_del(&q->list, &hb1->chain); | 1292 | plist_del(&q->list, &hb1->chain); |
| 1293 | hb_waiters_dec(hb1); | ||
| 1260 | plist_add(&q->list, &hb2->chain); | 1294 | plist_add(&q->list, &hb2->chain); |
| 1295 | hb_waiters_inc(hb2); | ||
| 1261 | q->lock_ptr = &hb2->lock; | 1296 | q->lock_ptr = &hb2->lock; |
| 1262 | } | 1297 | } |
| 1263 | get_futex_key_refs(key2); | 1298 | get_futex_key_refs(key2); |
| @@ -1431,6 +1466,7 @@ retry: | |||
| 1431 | hb2 = hash_futex(&key2); | 1466 | hb2 = hash_futex(&key2); |
| 1432 | 1467 | ||
| 1433 | retry_private: | 1468 | retry_private: |
| 1469 | hb_waiters_inc(hb2); | ||
| 1434 | double_lock_hb(hb1, hb2); | 1470 | double_lock_hb(hb1, hb2); |
| 1435 | 1471 | ||
| 1436 | if (likely(cmpval != NULL)) { | 1472 | if (likely(cmpval != NULL)) { |
| @@ -1440,6 +1476,7 @@ retry_private: | |||
| 1440 | 1476 | ||
| 1441 | if (unlikely(ret)) { | 1477 | if (unlikely(ret)) { |
| 1442 | double_unlock_hb(hb1, hb2); | 1478 | double_unlock_hb(hb1, hb2); |
| 1479 | hb_waiters_dec(hb2); | ||
| 1443 | 1480 | ||
| 1444 | ret = get_user(curval, uaddr1); | 1481 | ret = get_user(curval, uaddr1); |
| 1445 | if (ret) | 1482 | if (ret) |
| @@ -1489,6 +1526,7 @@ retry_private: | |||
| 1489 | break; | 1526 | break; |
| 1490 | case -EFAULT: | 1527 | case -EFAULT: |
| 1491 | double_unlock_hb(hb1, hb2); | 1528 | double_unlock_hb(hb1, hb2); |
| 1529 | hb_waiters_dec(hb2); | ||
| 1492 | put_futex_key(&key2); | 1530 | put_futex_key(&key2); |
| 1493 | put_futex_key(&key1); | 1531 | put_futex_key(&key1); |
| 1494 | ret = fault_in_user_writeable(uaddr2); | 1532 | ret = fault_in_user_writeable(uaddr2); |
| @@ -1498,6 +1536,7 @@ retry_private: | |||
| 1498 | case -EAGAIN: | 1536 | case -EAGAIN: |
| 1499 | /* The owner was exiting, try again. */ | 1537 | /* The owner was exiting, try again. */ |
| 1500 | double_unlock_hb(hb1, hb2); | 1538 | double_unlock_hb(hb1, hb2); |
| 1539 | hb_waiters_dec(hb2); | ||
| 1501 | put_futex_key(&key2); | 1540 | put_futex_key(&key2); |
| 1502 | put_futex_key(&key1); | 1541 | put_futex_key(&key1); |
| 1503 | cond_resched(); | 1542 | cond_resched(); |
| @@ -1573,6 +1612,7 @@ retry_private: | |||
| 1573 | 1612 | ||
| 1574 | out_unlock: | 1613 | out_unlock: |
| 1575 | double_unlock_hb(hb1, hb2); | 1614 | double_unlock_hb(hb1, hb2); |
| 1615 | hb_waiters_dec(hb2); | ||
| 1576 | 1616 | ||
| 1577 | /* | 1617 | /* |
| 1578 | * drop_futex_key_refs() must be called outside the spinlocks. During | 1618 | * drop_futex_key_refs() must be called outside the spinlocks. During |
| @@ -1600,6 +1640,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
| 1600 | struct futex_hash_bucket *hb; | 1640 | struct futex_hash_bucket *hb; |
| 1601 | 1641 | ||
| 1602 | hb = hash_futex(&q->key); | 1642 | hb = hash_futex(&q->key); |
| 1643 | |||
| 1644 | /* | ||
| 1645 | * Increment the counter before taking the lock so that | ||
| 1646 | * a potential waker won't miss a to-be-slept task that is | ||
| 1647 | * waiting for the spinlock. This is safe as all queue_lock() | ||
| 1648 | * users end up calling queue_me(). Similarly, for housekeeping, | ||
| 1649 | * decrement the counter at queue_unlock() when some error has | ||
| 1650 | * occurred and we don't end up adding the task to the list. | ||
| 1651 | */ | ||
| 1652 | hb_waiters_inc(hb); | ||
| 1653 | |||
| 1603 | q->lock_ptr = &hb->lock; | 1654 | q->lock_ptr = &hb->lock; |
| 1604 | 1655 | ||
| 1605 | spin_lock(&hb->lock); /* implies MB (A) */ | 1656 | spin_lock(&hb->lock); /* implies MB (A) */ |
| @@ -1611,6 +1662,7 @@ queue_unlock(struct futex_hash_bucket *hb) | |||
| 1611 | __releases(&hb->lock) | 1662 | __releases(&hb->lock) |
| 1612 | { | 1663 | { |
| 1613 | spin_unlock(&hb->lock); | 1664 | spin_unlock(&hb->lock); |
| 1665 | hb_waiters_dec(hb); | ||
| 1614 | } | 1666 | } |
| 1615 | 1667 | ||
| 1616 | /** | 1668 | /** |
| @@ -2342,6 +2394,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2342 | * Unqueue the futex_q and determine which it was. | 2394 | * Unqueue the futex_q and determine which it was. |
| 2343 | */ | 2395 | */ |
| 2344 | plist_del(&q->list, &hb->chain); | 2396 | plist_del(&q->list, &hb->chain); |
| 2397 | hb_waiters_dec(hb); | ||
| 2345 | 2398 | ||
| 2346 | /* Handle spurious wakeups gracefully */ | 2399 | /* Handle spurious wakeups gracefully */ |
| 2347 | ret = -EWOULDBLOCK; | 2400 | ret = -EWOULDBLOCK; |
| @@ -2843,9 +2896,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, | |||
| 2843 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 2896 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
| 2844 | } | 2897 | } |
| 2845 | 2898 | ||
| 2846 | static int __init futex_init(void) | 2899 | static void __init futex_detect_cmpxchg(void) |
| 2847 | { | 2900 | { |
| 2901 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG | ||
| 2848 | u32 curval; | 2902 | u32 curval; |
| 2903 | |||
| 2904 | /* | ||
| 2905 | * This will fail and we want it. Some arch implementations do | ||
| 2906 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | ||
| 2907 | * functionality. We want to know that before we call in any | ||
| 2908 | * of the complex code paths. Also we want to prevent | ||
| 2909 | * registration of robust lists in that case. NULL is | ||
| 2910 | * guaranteed to fault and we get -EFAULT on functional | ||
| 2911 | * implementation, the non-functional ones will return | ||
| 2912 | * -ENOSYS. | ||
| 2913 | */ | ||
| 2914 | if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) | ||
| 2915 | futex_cmpxchg_enabled = 1; | ||
| 2916 | #endif | ||
| 2917 | } | ||
| 2918 | |||
| 2919 | static int __init futex_init(void) | ||
| 2920 | { | ||
| 2849 | unsigned int futex_shift; | 2921 | unsigned int futex_shift; |
| 2850 | unsigned long i; | 2922 | unsigned long i; |
| 2851 | 2923 | ||
| @@ -2861,20 +2933,11 @@ static int __init futex_init(void) | |||
| 2861 | &futex_shift, NULL, | 2933 | &futex_shift, NULL, |
| 2862 | futex_hashsize, futex_hashsize); | 2934 | futex_hashsize, futex_hashsize); |
| 2863 | futex_hashsize = 1UL << futex_shift; | 2935 | futex_hashsize = 1UL << futex_shift; |
| 2864 | /* | 2936 | |
| 2865 | * This will fail and we want it. Some arch implementations do | 2937 | futex_detect_cmpxchg(); |
| 2866 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | ||
| 2867 | * functionality. We want to know that before we call in any | ||
| 2868 | * of the complex code paths. Also we want to prevent | ||
| 2869 | * registration of robust lists in that case. NULL is | ||
| 2870 | * guaranteed to fault and we get -EFAULT on functional | ||
| 2871 | * implementation, the non-functional ones will return | ||
| 2872 | * -ENOSYS. | ||
| 2873 | */ | ||
| 2874 | if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) | ||
| 2875 | futex_cmpxchg_enabled = 1; | ||
| 2876 | 2938 | ||
| 2877 | for (i = 0; i < futex_hashsize; i++) { | 2939 | for (i = 0; i < futex_hashsize; i++) { |
| 2940 | atomic_set(&futex_queues[i].waiters, 0); | ||
| 2878 | plist_head_init(&futex_queues[i].chain); | 2941 | plist_head_init(&futex_queues[i].chain); |
| 2879 | spin_lock_init(&futex_queues[i].lock); | 2942 | spin_lock_init(&futex_queues[i].lock); |
| 2880 | } | 2943 | } |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index f9f44fd4d34d..55c8c9349cfe 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
| @@ -183,7 +183,7 @@ COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, | |||
| 183 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 183 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
| 184 | cmd == FUTEX_WAIT_BITSET || | 184 | cmd == FUTEX_WAIT_BITSET || |
| 185 | cmd == FUTEX_WAIT_REQUEUE_PI)) { | 185 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
| 186 | if (get_compat_timespec(&ts, utime)) | 186 | if (compat_get_timespec(&ts, utime)) |
| 187 | return -EFAULT; | 187 | return -EFAULT; |
| 188 | if (!timespec_valid(&ts)) | 188 | if (!timespec_valid(&ts)) |
| 189 | return -EINVAL; | 189 | return -EINVAL; |
diff --git a/kernel/groups.c b/kernel/groups.c index 90cf1c38c8ea..451698f86cfa 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -157,17 +157,13 @@ int groups_search(const struct group_info *group_info, kgid_t grp) | |||
| 157 | * set_groups - Change a group subscription in a set of credentials | 157 | * set_groups - Change a group subscription in a set of credentials |
| 158 | * @new: The newly prepared set of credentials to alter | 158 | * @new: The newly prepared set of credentials to alter |
| 159 | * @group_info: The group list to install | 159 | * @group_info: The group list to install |
| 160 | * | ||
| 161 | * Validate a group subscription and, if valid, insert it into a set | ||
| 162 | * of credentials. | ||
| 163 | */ | 160 | */ |
| 164 | int set_groups(struct cred *new, struct group_info *group_info) | 161 | void set_groups(struct cred *new, struct group_info *group_info) |
| 165 | { | 162 | { |
| 166 | put_group_info(new->group_info); | 163 | put_group_info(new->group_info); |
| 167 | groups_sort(group_info); | 164 | groups_sort(group_info); |
| 168 | get_group_info(group_info); | 165 | get_group_info(group_info); |
| 169 | new->group_info = group_info; | 166 | new->group_info = group_info; |
| 170 | return 0; | ||
| 171 | } | 167 | } |
| 172 | 168 | ||
| 173 | EXPORT_SYMBOL(set_groups); | 169 | EXPORT_SYMBOL(set_groups); |
| @@ -182,18 +178,12 @@ EXPORT_SYMBOL(set_groups); | |||
| 182 | int set_current_groups(struct group_info *group_info) | 178 | int set_current_groups(struct group_info *group_info) |
| 183 | { | 179 | { |
| 184 | struct cred *new; | 180 | struct cred *new; |
| 185 | int ret; | ||
| 186 | 181 | ||
| 187 | new = prepare_creds(); | 182 | new = prepare_creds(); |
| 188 | if (!new) | 183 | if (!new) |
| 189 | return -ENOMEM; | 184 | return -ENOMEM; |
| 190 | 185 | ||
| 191 | ret = set_groups(new, group_info); | 186 | set_groups(new, group_info); |
| 192 | if (ret < 0) { | ||
| 193 | abort_creds(new); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | return commit_creds(new); | 187 | return commit_creds(new); |
| 198 | } | 188 | } |
| 199 | 189 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 09094361dce5..d55092ceee29 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
| 168 | } | 168 | } |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Get the preferred target CPU for NOHZ | ||
| 174 | */ | ||
| 175 | static int hrtimer_get_target(int this_cpu, int pinned) | ||
| 176 | { | ||
| 177 | #ifdef CONFIG_NO_HZ_COMMON | ||
| 178 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) | ||
| 179 | return get_nohz_timer_target(); | ||
| 180 | #endif | ||
| 181 | return this_cpu; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* | 171 | /* |
| 185 | * With HIGHRES=y we do not migrate the timer when it is expiring | 172 | * With HIGHRES=y we do not migrate the timer when it is expiring |
| 186 | * before the next event on the target cpu because we cannot reprogram | 173 | * before the next event on the target cpu because we cannot reprogram |
| @@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
| 214 | struct hrtimer_clock_base *new_base; | 201 | struct hrtimer_clock_base *new_base; |
| 215 | struct hrtimer_cpu_base *new_cpu_base; | 202 | struct hrtimer_cpu_base *new_cpu_base; |
| 216 | int this_cpu = smp_processor_id(); | 203 | int this_cpu = smp_processor_id(); |
| 217 | int cpu = hrtimer_get_target(this_cpu, pinned); | 204 | int cpu = get_nohz_timer_target(pinned); |
| 218 | int basenum = base->index; | 205 | int basenum = base->index; |
| 219 | 206 | ||
| 220 | again: | 207 | again: |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0b9c169d577f..06bb1417b063 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
| @@ -246,5 +246,4 @@ static int __init hung_task_init(void) | |||
| 246 | 246 | ||
| 247 | return 0; | 247 | return 0; |
| 248 | } | 248 | } |
| 249 | 249 | subsys_initcall(hung_task_init); | |
| 250 | module_init(hung_task_init); | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dc04c166c54d..6397df2d6945 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -281,6 +281,19 @@ void unmask_irq(struct irq_desc *desc) | |||
| 281 | } | 281 | } |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | void unmask_threaded_irq(struct irq_desc *desc) | ||
| 285 | { | ||
| 286 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 287 | |||
| 288 | if (chip->flags & IRQCHIP_EOI_THREADED) | ||
| 289 | chip->irq_eoi(&desc->irq_data); | ||
| 290 | |||
| 291 | if (chip->irq_unmask) { | ||
| 292 | chip->irq_unmask(&desc->irq_data); | ||
| 293 | irq_state_clr_masked(desc); | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 284 | /* | 297 | /* |
| 285 | * handle_nested_irq - Handle a nested irq from a irq thread | 298 | * handle_nested_irq - Handle a nested irq from a irq thread |
| 286 | * @irq: the interrupt number | 299 | * @irq: the interrupt number |
| @@ -435,6 +448,27 @@ static inline void preflow_handler(struct irq_desc *desc) | |||
| 435 | static inline void preflow_handler(struct irq_desc *desc) { } | 448 | static inline void preflow_handler(struct irq_desc *desc) { } |
| 436 | #endif | 449 | #endif |
| 437 | 450 | ||
| 451 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) | ||
| 452 | { | ||
| 453 | if (!(desc->istate & IRQS_ONESHOT)) { | ||
| 454 | chip->irq_eoi(&desc->irq_data); | ||
| 455 | return; | ||
| 456 | } | ||
| 457 | /* | ||
| 458 | * We need to unmask in the following cases: | ||
| 459 | * - Oneshot irq which did not wake the thread (caused by a | ||
| 460 | * spurious interrupt or a primary handler handling it | ||
| 461 | * completely). | ||
| 462 | */ | ||
| 463 | if (!irqd_irq_disabled(&desc->irq_data) && | ||
| 464 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | ||
| 465 | chip->irq_eoi(&desc->irq_data); | ||
| 466 | unmask_irq(desc); | ||
| 467 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | ||
| 468 | chip->irq_eoi(&desc->irq_data); | ||
| 469 | } | ||
| 470 | } | ||
| 471 | |||
| 438 | /** | 472 | /** |
| 439 | * handle_fasteoi_irq - irq handler for transparent controllers | 473 | * handle_fasteoi_irq - irq handler for transparent controllers |
| 440 | * @irq: the interrupt number | 474 | * @irq: the interrupt number |
| @@ -448,6 +482,8 @@ static inline void preflow_handler(struct irq_desc *desc) { } | |||
| 448 | void | 482 | void |
| 449 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 483 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
| 450 | { | 484 | { |
| 485 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 486 | |||
| 451 | raw_spin_lock(&desc->lock); | 487 | raw_spin_lock(&desc->lock); |
| 452 | 488 | ||
| 453 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 489 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
| @@ -473,18 +509,14 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 473 | preflow_handler(desc); | 509 | preflow_handler(desc); |
| 474 | handle_irq_event(desc); | 510 | handle_irq_event(desc); |
| 475 | 511 | ||
| 476 | if (desc->istate & IRQS_ONESHOT) | 512 | cond_unmask_eoi_irq(desc, chip); |
| 477 | cond_unmask_irq(desc); | ||
| 478 | 513 | ||
| 479 | out_eoi: | ||
| 480 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | ||
| 481 | out_unlock: | ||
| 482 | raw_spin_unlock(&desc->lock); | 514 | raw_spin_unlock(&desc->lock); |
| 483 | return; | 515 | return; |
| 484 | out: | 516 | out: |
| 485 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | 517 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
| 486 | goto out_eoi; | 518 | chip->irq_eoi(&desc->irq_data); |
| 487 | goto out_unlock; | 519 | raw_spin_unlock(&desc->lock); |
| 488 | } | 520 | } |
| 489 | 521 | ||
| 490 | /** | 522 | /** |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 131ca176b497..635480270858 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -41,6 +41,7 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
| 41 | { | 41 | { |
| 42 | return IRQ_NONE; | 42 | return IRQ_NONE; |
| 43 | } | 43 | } |
| 44 | EXPORT_SYMBOL_GPL(no_action); | ||
| 44 | 45 | ||
| 45 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | 46 | static void warn_no_thread(unsigned int irq, struct irqaction *action) |
| 46 | { | 47 | { |
| @@ -51,7 +52,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
| 51 | "but no thread function available.", irq, action->name); | 52 | "but no thread function available.", irq, action->name); |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | 55 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
| 55 | { | 56 | { |
| 56 | /* | 57 | /* |
| 57 | * In case the thread crashed and was killed we just pretend that | 58 | * In case the thread crashed and was killed we just pretend that |
| @@ -157,7 +158,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
| 157 | break; | 158 | break; |
| 158 | } | 159 | } |
| 159 | 160 | ||
| 160 | irq_wake_thread(desc, action); | 161 | __irq_wake_thread(desc, action); |
| 161 | 162 | ||
| 162 | /* Fall through to add to randomness */ | 163 | /* Fall through to add to randomness */ |
| 163 | case IRQ_HANDLED: | 164 | case IRQ_HANDLED: |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 001fa5bab490..ddf1ffeb79f1 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * of this file for your non core code. | 6 | * of this file for your non core code. |
| 7 | */ | 7 | */ |
| 8 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
| 9 | #include <linux/kernel_stat.h> | ||
| 9 | 10 | ||
| 10 | #ifdef CONFIG_SPARSE_IRQ | 11 | #ifdef CONFIG_SPARSE_IRQ |
| 11 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | 12 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
| @@ -73,6 +74,7 @@ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | |||
| 73 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | 74 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
| 74 | extern void mask_irq(struct irq_desc *desc); | 75 | extern void mask_irq(struct irq_desc *desc); |
| 75 | extern void unmask_irq(struct irq_desc *desc); | 76 | extern void unmask_irq(struct irq_desc *desc); |
| 77 | extern void unmask_threaded_irq(struct irq_desc *desc); | ||
| 76 | 78 | ||
| 77 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 79 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 78 | 80 | ||
| @@ -82,6 +84,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc); | |||
| 82 | /* Resending of interrupts :*/ | 84 | /* Resending of interrupts :*/ |
| 83 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 85 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 84 | bool irq_wait_for_poll(struct irq_desc *desc); | 86 | bool irq_wait_for_poll(struct irq_desc *desc); |
| 87 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); | ||
| 85 | 88 | ||
| 86 | #ifdef CONFIG_PROC_FS | 89 | #ifdef CONFIG_PROC_FS |
| 87 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 90 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
| @@ -179,3 +182,9 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | |||
| 179 | { | 182 | { |
| 180 | return d->state_use_accessors & mask; | 183 | return d->state_use_accessors & mask; |
| 181 | } | 184 | } |
| 185 | |||
| 186 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | ||
| 187 | { | ||
| 188 | __this_cpu_inc(*desc->kstat_irqs); | ||
| 189 | __this_cpu_inc(kstat.irqs_sum); | ||
| 190 | } | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 8ab8e9390297..a7174617616b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -489,6 +489,11 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 489 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 489 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | void kstat_incr_irq_this_cpu(unsigned int irq) | ||
| 493 | { | ||
| 494 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | ||
| 495 | } | ||
| 496 | |||
| 492 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 497 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 493 | { | 498 | { |
| 494 | struct irq_desc *desc = irq_to_desc(irq); | 499 | struct irq_desc *desc = irq_to_desc(irq); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
| 11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
| 12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
| 13 | #include <linux/of_irq.h> | ||
| 13 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
| 14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
| 15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..2486a4c1a710 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg) | |||
| 32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
| 35 | /** | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
| 36 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 37 | * @irq: interrupt number to wait for | ||
| 38 | * | ||
| 39 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 40 | * to complete before returning. If you use this function while | ||
| 41 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 42 | * | ||
| 43 | * This function may be called - with care - from IRQ context. | ||
| 44 | */ | ||
| 45 | void synchronize_irq(unsigned int irq) | ||
| 46 | { | 36 | { |
| 47 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 48 | bool inprogress; | 37 | bool inprogress; |
| 49 | 38 | ||
| 50 | if (!desc) | ||
| 51 | return; | ||
| 52 | |||
| 53 | do { | 39 | do { |
| 54 | unsigned long flags; | 40 | unsigned long flags; |
| 55 | 41 | ||
| @@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq) | |||
| 67 | 53 | ||
| 68 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
| 69 | } while (inprogress); | 55 | } while (inprogress); |
| 56 | } | ||
| 70 | 57 | ||
| 71 | /* | 58 | /** |
| 72 | * We made sure that no hardirq handler is running. Now verify | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
| 73 | * that no threaded handlers are active. | 60 | * @irq: interrupt number to wait for |
| 74 | */ | 61 | * |
| 75 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 62 | * This function waits for any pending hard IRQ handlers for this |
| 63 | * interrupt to complete before returning. If you use this | ||
| 64 | * function while holding a resource the IRQ handler may need you | ||
| 65 | * will deadlock. It does not take associated threaded handlers | ||
| 66 | * into account. | ||
| 67 | * | ||
| 68 | * Do not use this for shutdown scenarios where you must be sure | ||
| 69 | * that all parts (hardirq and threaded handler) have completed. | ||
| 70 | * | ||
| 71 | * This function may be called - with care - from IRQ context. | ||
| 72 | */ | ||
| 73 | void synchronize_hardirq(unsigned int irq) | ||
| 74 | { | ||
| 75 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 76 | |||
| 77 | if (desc) | ||
| 78 | __synchronize_hardirq(desc); | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(synchronize_hardirq); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 84 | * @irq: interrupt number to wait for | ||
| 85 | * | ||
| 86 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 87 | * to complete before returning. If you use this function while | ||
| 88 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 89 | * | ||
| 90 | * This function may be called - with care - from IRQ context. | ||
| 91 | */ | ||
| 92 | void synchronize_irq(unsigned int irq) | ||
| 93 | { | ||
| 94 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 95 | |||
| 96 | if (desc) { | ||
| 97 | __synchronize_hardirq(desc); | ||
| 98 | /* | ||
| 99 | * We made sure that no hardirq handler is | ||
| 100 | * running. Now verify that no threaded handlers are | ||
| 101 | * active. | ||
| 102 | */ | ||
| 103 | wait_event(desc->wait_for_threads, | ||
| 104 | !atomic_read(&desc->threads_active)); | ||
| 105 | } | ||
| 76 | } | 106 | } |
| 77 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
| 78 | 108 | ||
| @@ -718,7 +748,7 @@ again: | |||
| 718 | 748 | ||
| 719 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
| 720 | irqd_irq_masked(&desc->irq_data)) | 750 | irqd_irq_masked(&desc->irq_data)) |
| 721 | unmask_irq(desc); | 751 | unmask_threaded_irq(desc); |
| 722 | 752 | ||
| 723 | out_unlock: | 753 | out_unlock: |
| 724 | raw_spin_unlock_irq(&desc->lock); | 754 | raw_spin_unlock_irq(&desc->lock); |
| @@ -727,7 +757,7 @@ out_unlock: | |||
| 727 | 757 | ||
| 728 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
| 729 | /* | 759 | /* |
| 730 | * Check whether we need to chasnge the affinity of the interrupt thread. | 760 | * Check whether we need to change the affinity of the interrupt thread. |
| 731 | */ | 761 | */ |
| 732 | static void | 762 | static void |
| 733 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
| @@ -802,8 +832,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
| 802 | 832 | ||
| 803 | static void wake_threads_waitq(struct irq_desc *desc) | 833 | static void wake_threads_waitq(struct irq_desc *desc) |
| 804 | { | 834 | { |
| 805 | if (atomic_dec_and_test(&desc->threads_active) && | 835 | if (atomic_dec_and_test(&desc->threads_active)) |
| 806 | waitqueue_active(&desc->wait_for_threads)) | ||
| 807 | wake_up(&desc->wait_for_threads); | 836 | wake_up(&desc->wait_for_threads); |
| 808 | } | 837 | } |
| 809 | 838 | ||
| @@ -881,6 +910,33 @@ static int irq_thread(void *data) | |||
| 881 | return 0; | 910 | return 0; |
| 882 | } | 911 | } |
| 883 | 912 | ||
| 913 | /** | ||
| 914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | ||
| 915 | * @irq: Interrupt line | ||
| 916 | * @dev_id: Device identity for which the thread should be woken | ||
| 917 | * | ||
| 918 | */ | ||
| 919 | void irq_wake_thread(unsigned int irq, void *dev_id) | ||
| 920 | { | ||
| 921 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 922 | struct irqaction *action; | ||
| 923 | unsigned long flags; | ||
| 924 | |||
| 925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
| 926 | return; | ||
| 927 | |||
| 928 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 929 | for (action = desc->action; action; action = action->next) { | ||
| 930 | if (action->dev_id == dev_id) { | ||
| 931 | if (action->thread) | ||
| 932 | __irq_wake_thread(desc, action); | ||
| 933 | break; | ||
| 934 | } | ||
| 935 | } | ||
| 936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | ||
| 939 | |||
| 884 | static void irq_setup_forced_threading(struct irqaction *new) | 940 | static void irq_setup_forced_threading(struct irqaction *new) |
| 885 | { | 941 | { |
| 886 | if (!force_irqthreads) | 942 | if (!force_irqthreads) |
| @@ -897,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new) | |||
| 897 | } | 953 | } |
| 898 | } | 954 | } |
| 899 | 955 | ||
| 956 | static int irq_request_resources(struct irq_desc *desc) | ||
| 957 | { | ||
| 958 | struct irq_data *d = &desc->irq_data; | ||
| 959 | struct irq_chip *c = d->chip; | ||
| 960 | |||
| 961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | ||
| 962 | } | ||
| 963 | |||
| 964 | static void irq_release_resources(struct irq_desc *desc) | ||
| 965 | { | ||
| 966 | struct irq_data *d = &desc->irq_data; | ||
| 967 | struct irq_chip *c = d->chip; | ||
| 968 | |||
| 969 | if (c->irq_release_resources) | ||
| 970 | c->irq_release_resources(d); | ||
| 971 | } | ||
| 972 | |||
| 900 | /* | 973 | /* |
| 901 | * Internal function to register an irqaction - typically used to | 974 | * Internal function to register an irqaction - typically used to |
| 902 | * allocate special interrupts that are part of the architecture. | 975 | * allocate special interrupts that are part of the architecture. |
| @@ -1092,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1092 | } | 1165 | } |
| 1093 | 1166 | ||
| 1094 | if (!shared) { | 1167 | if (!shared) { |
| 1168 | ret = irq_request_resources(desc); | ||
| 1169 | if (ret) { | ||
| 1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | ||
| 1171 | new->name, irq, desc->irq_data.chip->name); | ||
| 1172 | goto out_mask; | ||
| 1173 | } | ||
| 1174 | |||
| 1095 | init_waitqueue_head(&desc->wait_for_threads); | 1175 | init_waitqueue_head(&desc->wait_for_threads); |
| 1096 | 1176 | ||
| 1097 | /* Setup the type (level, edge polarity) if configured: */ | 1177 | /* Setup the type (level, edge polarity) if configured: */ |
| @@ -1262,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 1262 | *action_ptr = action->next; | 1342 | *action_ptr = action->next; |
| 1263 | 1343 | ||
| 1264 | /* If this was the last handler, shut down the IRQ line: */ | 1344 | /* If this was the last handler, shut down the IRQ line: */ |
| 1265 | if (!desc->action) | 1345 | if (!desc->action) { |
| 1266 | irq_shutdown(desc); | 1346 | irq_shutdown(desc); |
| 1347 | irq_release_resources(desc); | ||
| 1348 | } | ||
| 1267 | 1349 | ||
| 1268 | #ifdef CONFIG_SMP | 1350 | #ifdef CONFIG_SMP |
| 1269 | /* make sure affinity_hint is cleaned up */ | 1351 | /* make sure affinity_hint is cleaned up */ |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 36f6ee181b0c..ac1ba2f11032 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -324,15 +324,15 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
| 324 | 324 | ||
| 325 | #ifdef CONFIG_SMP | 325 | #ifdef CONFIG_SMP |
| 326 | /* create /proc/irq/<irq>/smp_affinity */ | 326 | /* create /proc/irq/<irq>/smp_affinity */ |
| 327 | proc_create_data("smp_affinity", 0600, desc->dir, | 327 | proc_create_data("smp_affinity", 0644, desc->dir, |
| 328 | &irq_affinity_proc_fops, (void *)(long)irq); | 328 | &irq_affinity_proc_fops, (void *)(long)irq); |
| 329 | 329 | ||
| 330 | /* create /proc/irq/<irq>/affinity_hint */ | 330 | /* create /proc/irq/<irq>/affinity_hint */ |
| 331 | proc_create_data("affinity_hint", 0400, desc->dir, | 331 | proc_create_data("affinity_hint", 0444, desc->dir, |
| 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
| 333 | 333 | ||
| 334 | /* create /proc/irq/<irq>/smp_affinity_list */ | 334 | /* create /proc/irq/<irq>/smp_affinity_list */ |
| 335 | proc_create_data("smp_affinity_list", 0600, desc->dir, | 335 | proc_create_data("smp_affinity_list", 0644, desc->dir, |
| 336 | &irq_affinity_list_proc_fops, (void *)(long)irq); | 336 | &irq_affinity_list_proc_fops, (void *)(long)irq); |
| 337 | 337 | ||
| 338 | proc_create_data("node", 0444, desc->dir, | 338 | proc_create_data("node", 0444, desc->dir, |
| @@ -372,7 +372,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
| 372 | static void register_default_affinity_proc(void) | 372 | static void register_default_affinity_proc(void) |
| 373 | { | 373 | { |
| 374 | #ifdef CONFIG_SMP | 374 | #ifdef CONFIG_SMP |
| 375 | proc_create("irq/default_smp_affinity", 0600, NULL, | 375 | proc_create("irq/default_smp_affinity", 0644, NULL, |
| 376 | &default_affinity_proc_fops); | 376 | &default_affinity_proc_fops); |
| 377 | #endif | 377 | #endif |
| 378 | } | 378 | } |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 55fcce6065cf..a82170e2fa78 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
| @@ -61,11 +61,11 @@ void __weak arch_irq_work_raise(void) | |||
| 61 | * | 61 | * |
| 62 | * Can be re-enqueued while the callback is still in progress. | 62 | * Can be re-enqueued while the callback is still in progress. |
| 63 | */ | 63 | */ |
| 64 | void irq_work_queue(struct irq_work *work) | 64 | bool irq_work_queue(struct irq_work *work) |
| 65 | { | 65 | { |
| 66 | /* Only queue if not already pending */ | 66 | /* Only queue if not already pending */ |
| 67 | if (!irq_work_claim(work)) | 67 | if (!irq_work_claim(work)) |
| 68 | return; | 68 | return false; |
| 69 | 69 | ||
| 70 | /* Queue the entry and raise the IPI if needed. */ | 70 | /* Queue the entry and raise the IPI if needed. */ |
| 71 | preempt_disable(); | 71 | preempt_disable(); |
| @@ -83,6 +83,8 @@ void irq_work_queue(struct irq_work *work) | |||
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | preempt_enable(); | 85 | preempt_enable(); |
| 86 | |||
| 87 | return true; | ||
| 86 | } | 88 | } |
| 87 | EXPORT_SYMBOL_GPL(irq_work_queue); | 89 | EXPORT_SYMBOL_GPL(irq_work_queue); |
| 88 | 90 | ||
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 3127ad52cdb2..cb0cf37dac3a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| 24 | #include <linux/ctype.h> | 24 | #include <linux/ctype.h> |
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/compiler.h> | ||
| 26 | 27 | ||
| 27 | #include <asm/sections.h> | 28 | #include <asm/sections.h> |
| 28 | 29 | ||
| @@ -36,8 +37,8 @@ | |||
| 36 | * These will be re-linked against their real values | 37 | * These will be re-linked against their real values |
| 37 | * during the second link stage. | 38 | * during the second link stage. |
| 38 | */ | 39 | */ |
| 39 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); | 40 | extern const unsigned long kallsyms_addresses[] __weak; |
| 40 | extern const u8 kallsyms_names[] __attribute__((weak)); | 41 | extern const u8 kallsyms_names[] __weak; |
| 41 | 42 | ||
| 42 | /* | 43 | /* |
| 43 | * Tell the compiler that the count isn't in the small data section if the arch | 44 | * Tell the compiler that the count isn't in the small data section if the arch |
| @@ -46,10 +47,10 @@ extern const u8 kallsyms_names[] __attribute__((weak)); | |||
| 46 | extern const unsigned long kallsyms_num_syms | 47 | extern const unsigned long kallsyms_num_syms |
| 47 | __attribute__((weak, section(".rodata"))); | 48 | __attribute__((weak, section(".rodata"))); |
| 48 | 49 | ||
| 49 | extern const u8 kallsyms_token_table[] __attribute__((weak)); | 50 | extern const u8 kallsyms_token_table[] __weak; |
| 50 | extern const u16 kallsyms_token_index[] __attribute__((weak)); | 51 | extern const u16 kallsyms_token_index[] __weak; |
| 51 | 52 | ||
| 52 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); | 53 | extern const unsigned long kallsyms_markers[] __weak; |
| 53 | 54 | ||
| 54 | static inline int is_kernel_inittext(unsigned long addr) | 55 | static inline int is_kernel_inittext(unsigned long addr) |
| 55 | { | 56 | { |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 60bafbed06ab..c8380ad203bc 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
| 33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
| 34 | #include <linux/syscore_ops.h> | 34 | #include <linux/syscore_ops.h> |
| 35 | #include <linux/compiler.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
| 37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
| @@ -1039,10 +1040,10 @@ void __weak crash_unmap_reserved_pages(void) | |||
| 1039 | {} | 1040 | {} |
| 1040 | 1041 | ||
| 1041 | #ifdef CONFIG_COMPAT | 1042 | #ifdef CONFIG_COMPAT |
| 1042 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | 1043 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
| 1043 | unsigned long nr_segments, | 1044 | compat_ulong_t, nr_segments, |
| 1044 | struct compat_kexec_segment __user *segments, | 1045 | struct compat_kexec_segment __user *, segments, |
| 1045 | unsigned long flags) | 1046 | compat_ulong_t, flags) |
| 1046 | { | 1047 | { |
| 1047 | struct compat_kexec_segment in; | 1048 | struct compat_kexec_segment in; |
| 1048 | struct kexec_segment out, __user *ksegments; | 1049 | struct kexec_segment out, __user *ksegments; |
| @@ -1235,7 +1236,7 @@ static int __init crash_notes_memory_init(void) | |||
| 1235 | } | 1236 | } |
| 1236 | return 0; | 1237 | return 0; |
| 1237 | } | 1238 | } |
| 1238 | module_init(crash_notes_memory_init) | 1239 | subsys_initcall(crash_notes_memory_init); |
| 1239 | 1240 | ||
| 1240 | 1241 | ||
| 1241 | /* | 1242 | /* |
| @@ -1551,10 +1552,10 @@ void vmcoreinfo_append_str(const char *fmt, ...) | |||
| 1551 | * provide an empty default implementation here -- architecture | 1552 | * provide an empty default implementation here -- architecture |
| 1552 | * code may override this | 1553 | * code may override this |
| 1553 | */ | 1554 | */ |
| 1554 | void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) | 1555 | void __weak arch_crash_save_vmcoreinfo(void) |
| 1555 | {} | 1556 | {} |
| 1556 | 1557 | ||
| 1557 | unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) | 1558 | unsigned long __weak paddr_vmcoreinfo_note(void) |
| 1558 | { | 1559 | { |
| 1559 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | 1560 | return __pa((unsigned long)(char *)&vmcoreinfo_note); |
| 1560 | } | 1561 | } |
| @@ -1629,7 +1630,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 1629 | return 0; | 1630 | return 0; |
| 1630 | } | 1631 | } |
| 1631 | 1632 | ||
| 1632 | module_init(crash_save_vmcoreinfo_init) | 1633 | subsys_initcall(crash_save_vmcoreinfo_init); |
| 1633 | 1634 | ||
| 1634 | /* | 1635 | /* |
| 1635 | * Move into place and start executing a preloaded standalone | 1636 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index d945a949760f..2495a9b14ac8 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
| @@ -18,6 +18,9 @@ | |||
| 18 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
| 21 | #include <linux/compiler.h> | ||
| 22 | |||
| 23 | #include <linux/rcupdate.h> /* rcu_expedited */ | ||
| 21 | 24 | ||
| 22 | #define KERNEL_ATTR_RO(_name) \ | 25 | #define KERNEL_ATTR_RO(_name) \ |
| 23 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | 26 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| @@ -160,8 +163,8 @@ KERNEL_ATTR_RW(rcu_expedited); | |||
| 160 | /* | 163 | /* |
| 161 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. | 164 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. |
| 162 | */ | 165 | */ |
| 163 | extern const void __start_notes __attribute__((weak)); | 166 | extern const void __start_notes __weak; |
| 164 | extern const void __stop_notes __attribute__((weak)); | 167 | extern const void __stop_notes __weak; |
| 165 | #define notes_size (&__stop_notes - &__start_notes) | 168 | #define notes_size (&__stop_notes - &__start_notes) |
| 166 | 169 | ||
| 167 | static ssize_t notes_read(struct file *filp, struct kobject *kobj, | 170 | static ssize_t notes_read(struct file *filp, struct kobject *kobj, |
diff --git a/kernel/kthread.c b/kernel/kthread.c index b5ae3ee860a9..9a130ec06f7a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -217,7 +217,7 @@ int tsk_fork_get_node(struct task_struct *tsk) | |||
| 217 | if (tsk == kthreadd_task) | 217 | if (tsk == kthreadd_task) |
| 218 | return tsk->pref_node_fork; | 218 | return tsk->pref_node_fork; |
| 219 | #endif | 219 | #endif |
| 220 | return numa_node_id(); | 220 | return NUMA_NO_NODE; |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static void create_kthread(struct kthread_create_info *create) | 223 | static void create_kthread(struct kthread_create_info *create) |
| @@ -369,7 +369,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
| 369 | { | 369 | { |
| 370 | struct task_struct *p; | 370 | struct task_struct *p; |
| 371 | 371 | ||
| 372 | p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, | 372 | p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, |
| 373 | cpu); | 373 | cpu); |
| 374 | if (IS_ERR(p)) | 374 | if (IS_ERR(p)) |
| 375 | return p; | 375 | return p; |
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index baab8e5e7f66..b8bdcd4785b7 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | 1 | ||
| 2 | obj-y += mutex.o semaphore.o rwsem.o lglock.o | 2 | obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o |
| 3 | 3 | ||
| 4 | ifdef CONFIG_FUNCTION_TRACER | 4 | ifdef CONFIG_FUNCTION_TRACER |
| 5 | CFLAGS_REMOVE_lockdep.o = -pg | 5 | CFLAGS_REMOVE_lockdep.o = -pg |
| @@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y) | |||
| 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o | 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
| 15 | endif | 15 | endif |
| 16 | obj-$(CONFIG_SMP) += spinlock.o | 16 | obj-$(CONFIG_SMP) += spinlock.o |
| 17 | obj-$(CONFIG_SMP) += lglock.o | ||
| 17 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 18 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
| 18 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
| 19 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 20 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
| @@ -23,3 +24,4 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | |||
| 23 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 24 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
| 24 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o | 25 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o |
| 25 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o | 26 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o |
| 27 | obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o | ||
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index eb8a54783fa0..b0e9467922e1 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -1936,12 +1936,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
| 1936 | 1936 | ||
| 1937 | for (;;) { | 1937 | for (;;) { |
| 1938 | int distance = curr->lockdep_depth - depth + 1; | 1938 | int distance = curr->lockdep_depth - depth + 1; |
| 1939 | hlock = curr->held_locks + depth-1; | 1939 | hlock = curr->held_locks + depth - 1; |
| 1940 | /* | 1940 | /* |
| 1941 | * Only non-recursive-read entries get new dependencies | 1941 | * Only non-recursive-read entries get new dependencies |
| 1942 | * added: | 1942 | * added: |
| 1943 | */ | 1943 | */ |
| 1944 | if (hlock->read != 2) { | 1944 | if (hlock->read != 2 && hlock->check) { |
| 1945 | if (!check_prev_add(curr, hlock, next, | 1945 | if (!check_prev_add(curr, hlock, next, |
| 1946 | distance, trylock_loop)) | 1946 | distance, trylock_loop)) |
| 1947 | return 0; | 1947 | return 0; |
| @@ -2098,7 +2098,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | |||
| 2098 | * (If lookup_chain_cache() returns with 1 it acquires | 2098 | * (If lookup_chain_cache() returns with 1 it acquires |
| 2099 | * graph_lock for us) | 2099 | * graph_lock for us) |
| 2100 | */ | 2100 | */ |
| 2101 | if (!hlock->trylock && (hlock->check == 2) && | 2101 | if (!hlock->trylock && hlock->check && |
| 2102 | lookup_chain_cache(curr, hlock, chain_key)) { | 2102 | lookup_chain_cache(curr, hlock, chain_key)) { |
| 2103 | /* | 2103 | /* |
| 2104 | * Check whether last held lock: | 2104 | * Check whether last held lock: |
| @@ -2517,7 +2517,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
| 2517 | 2517 | ||
| 2518 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); | 2518 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
| 2519 | 2519 | ||
| 2520 | if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys) | 2520 | if (!hlock->check) |
| 2521 | continue; | 2521 | continue; |
| 2522 | 2522 | ||
| 2523 | if (!mark_lock(curr, hlock, usage_bit)) | 2523 | if (!mark_lock(curr, hlock, usage_bit)) |
| @@ -2557,7 +2557,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip) | |||
| 2557 | debug_atomic_inc(hardirqs_on_events); | 2557 | debug_atomic_inc(hardirqs_on_events); |
| 2558 | } | 2558 | } |
| 2559 | 2559 | ||
| 2560 | void trace_hardirqs_on_caller(unsigned long ip) | 2560 | __visible void trace_hardirqs_on_caller(unsigned long ip) |
| 2561 | { | 2561 | { |
| 2562 | time_hardirqs_on(CALLER_ADDR0, ip); | 2562 | time_hardirqs_on(CALLER_ADDR0, ip); |
| 2563 | 2563 | ||
| @@ -2610,7 +2610,7 @@ EXPORT_SYMBOL(trace_hardirqs_on); | |||
| 2610 | /* | 2610 | /* |
| 2611 | * Hardirqs were disabled: | 2611 | * Hardirqs were disabled: |
| 2612 | */ | 2612 | */ |
| 2613 | void trace_hardirqs_off_caller(unsigned long ip) | 2613 | __visible void trace_hardirqs_off_caller(unsigned long ip) |
| 2614 | { | 2614 | { |
| 2615 | struct task_struct *curr = current; | 2615 | struct task_struct *curr = current; |
| 2616 | 2616 | ||
| @@ -3055,9 +3055,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3055 | int class_idx; | 3055 | int class_idx; |
| 3056 | u64 chain_key; | 3056 | u64 chain_key; |
| 3057 | 3057 | ||
| 3058 | if (!prove_locking) | ||
| 3059 | check = 1; | ||
| 3060 | |||
| 3061 | if (unlikely(!debug_locks)) | 3058 | if (unlikely(!debug_locks)) |
| 3062 | return 0; | 3059 | return 0; |
| 3063 | 3060 | ||
| @@ -3069,8 +3066,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3069 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 3066 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 3070 | return 0; | 3067 | return 0; |
| 3071 | 3068 | ||
| 3072 | if (lock->key == &__lockdep_no_validate__) | 3069 | if (!prove_locking || lock->key == &__lockdep_no_validate__) |
| 3073 | check = 1; | 3070 | check = 0; |
| 3074 | 3071 | ||
| 3075 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) | 3072 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
| 3076 | class = lock->class_cache[subclass]; | 3073 | class = lock->class_cache[subclass]; |
| @@ -3138,7 +3135,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3138 | hlock->holdtime_stamp = lockstat_clock(); | 3135 | hlock->holdtime_stamp = lockstat_clock(); |
| 3139 | #endif | 3136 | #endif |
| 3140 | 3137 | ||
| 3141 | if (check == 2 && !mark_irqflags(curr, hlock)) | 3138 | if (check && !mark_irqflags(curr, hlock)) |
| 3142 | return 0; | 3139 | return 0; |
| 3143 | 3140 | ||
| 3144 | /* mark it as used: */ | 3141 | /* mark it as used: */ |
| @@ -4191,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task) | |||
| 4191 | } | 4188 | } |
| 4192 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 4189 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
| 4193 | 4190 | ||
| 4194 | void lockdep_sys_exit(void) | 4191 | asmlinkage void lockdep_sys_exit(void) |
| 4195 | { | 4192 | { |
| 4196 | struct task_struct *curr = current; | 4193 | struct task_struct *curr = current; |
| 4197 | 4194 | ||
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c new file mode 100644 index 000000000000..f26b1a18e34e --- /dev/null +++ b/kernel/locking/locktorture.c | |||
| @@ -0,0 +1,452 @@ | |||
| 1 | /* | ||
| 2 | * Module-based torture test facility for locking | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright (C) IBM Corporation, 2014 | ||
| 19 | * | ||
| 20 | * Author: Paul E. McKenney <paulmck@us.ibm.com> | ||
| 21 | * Based on kernel/rcu/torture.c. | ||
| 22 | */ | ||
| 23 | #include <linux/types.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/kthread.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <linux/spinlock.h> | ||
| 30 | #include <linux/smp.h> | ||
| 31 | #include <linux/interrupt.h> | ||
| 32 | #include <linux/sched.h> | ||
| 33 | #include <linux/atomic.h> | ||
| 34 | #include <linux/bitops.h> | ||
| 35 | #include <linux/completion.h> | ||
| 36 | #include <linux/moduleparam.h> | ||
| 37 | #include <linux/percpu.h> | ||
| 38 | #include <linux/notifier.h> | ||
| 39 | #include <linux/reboot.h> | ||
| 40 | #include <linux/freezer.h> | ||
| 41 | #include <linux/cpu.h> | ||
| 42 | #include <linux/delay.h> | ||
| 43 | #include <linux/stat.h> | ||
| 44 | #include <linux/slab.h> | ||
| 45 | #include <linux/trace_clock.h> | ||
| 46 | #include <asm/byteorder.h> | ||
| 47 | #include <linux/torture.h> | ||
| 48 | |||
| 49 | MODULE_LICENSE("GPL"); | ||
| 50 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); | ||
| 51 | |||
| 52 | torture_param(int, nwriters_stress, -1, | ||
| 53 | "Number of write-locking stress-test threads"); | ||
| 54 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); | ||
| 55 | torture_param(int, onoff_interval, 0, | ||
| 56 | "Time between CPU hotplugs (s), 0=disable"); | ||
| 57 | torture_param(int, shuffle_interval, 3, | ||
| 58 | "Number of jiffies between shuffles, 0=disable"); | ||
| 59 | torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); | ||
| 60 | torture_param(int, stat_interval, 60, | ||
| 61 | "Number of seconds between stats printk()s"); | ||
| 62 | torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); | ||
| 63 | torture_param(bool, verbose, true, | ||
| 64 | "Enable verbose debugging printk()s"); | ||
| 65 | |||
| 66 | static char *torture_type = "spin_lock"; | ||
| 67 | module_param(torture_type, charp, 0444); | ||
| 68 | MODULE_PARM_DESC(torture_type, | ||
| 69 | "Type of lock to torture (spin_lock, spin_lock_irq, ...)"); | ||
| 70 | |||
| 71 | static atomic_t n_lock_torture_errors; | ||
| 72 | |||
| 73 | static struct task_struct *stats_task; | ||
| 74 | static struct task_struct **writer_tasks; | ||
| 75 | |||
| 76 | static int nrealwriters_stress; | ||
| 77 | static bool lock_is_write_held; | ||
| 78 | |||
| 79 | struct lock_writer_stress_stats { | ||
| 80 | long n_write_lock_fail; | ||
| 81 | long n_write_lock_acquired; | ||
| 82 | }; | ||
| 83 | static struct lock_writer_stress_stats *lwsa; | ||
| 84 | |||
| 85 | #if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE) | ||
| 86 | #define LOCKTORTURE_RUNNABLE_INIT 1 | ||
| 87 | #else | ||
| 88 | #define LOCKTORTURE_RUNNABLE_INIT 0 | ||
| 89 | #endif | ||
| 90 | int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT; | ||
| 91 | module_param(locktorture_runnable, int, 0444); | ||
| 92 | MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot"); | ||
| 93 | |||
| 94 | /* Forward reference. */ | ||
| 95 | static void lock_torture_cleanup(void); | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Operations vector for selecting different types of tests. | ||
| 99 | */ | ||
| 100 | struct lock_torture_ops { | ||
| 101 | void (*init)(void); | ||
| 102 | int (*writelock)(void); | ||
| 103 | void (*write_delay)(struct torture_random_state *trsp); | ||
| 104 | void (*writeunlock)(void); | ||
| 105 | unsigned long flags; | ||
| 106 | const char *name; | ||
| 107 | }; | ||
| 108 | |||
| 109 | static struct lock_torture_ops *cur_ops; | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Definitions for lock torture testing. | ||
| 113 | */ | ||
| 114 | |||
| 115 | static int torture_lock_busted_write_lock(void) | ||
| 116 | { | ||
| 117 | return 0; /* BUGGY, do not use in real life!!! */ | ||
| 118 | } | ||
| 119 | |||
| 120 | static void torture_lock_busted_write_delay(struct torture_random_state *trsp) | ||
| 121 | { | ||
| 122 | const unsigned long longdelay_us = 100; | ||
| 123 | |||
| 124 | /* We want a long delay occasionally to force massive contention. */ | ||
| 125 | if (!(torture_random(trsp) % | ||
| 126 | (nrealwriters_stress * 2000 * longdelay_us))) | ||
| 127 | mdelay(longdelay_us); | ||
| 128 | #ifdef CONFIG_PREEMPT | ||
| 129 | if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) | ||
| 130 | preempt_schedule(); /* Allow test to be preempted. */ | ||
| 131 | #endif | ||
| 132 | } | ||
| 133 | |||
| 134 | static void torture_lock_busted_write_unlock(void) | ||
| 135 | { | ||
| 136 | /* BUGGY, do not use in real life!!! */ | ||
| 137 | } | ||
| 138 | |||
| 139 | static struct lock_torture_ops lock_busted_ops = { | ||
| 140 | .writelock = torture_lock_busted_write_lock, | ||
| 141 | .write_delay = torture_lock_busted_write_delay, | ||
| 142 | .writeunlock = torture_lock_busted_write_unlock, | ||
| 143 | .name = "lock_busted" | ||
| 144 | }; | ||
| 145 | |||
| 146 | static DEFINE_SPINLOCK(torture_spinlock); | ||
| 147 | |||
| 148 | static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) | ||
| 149 | { | ||
| 150 | spin_lock(&torture_spinlock); | ||
| 151 | return 0; | ||
| 152 | } | ||
| 153 | |||
| 154 | static void torture_spin_lock_write_delay(struct torture_random_state *trsp) | ||
| 155 | { | ||
| 156 | const unsigned long shortdelay_us = 2; | ||
| 157 | const unsigned long longdelay_us = 100; | ||
| 158 | |||
| 159 | /* We want a short delay mostly to emulate likely code, and | ||
| 160 | * we want a long delay occasionally to force massive contention. | ||
| 161 | */ | ||
| 162 | if (!(torture_random(trsp) % | ||
| 163 | (nrealwriters_stress * 2000 * longdelay_us))) | ||
| 164 | mdelay(longdelay_us); | ||
| 165 | if (!(torture_random(trsp) % | ||
| 166 | (nrealwriters_stress * 2 * shortdelay_us))) | ||
| 167 | udelay(shortdelay_us); | ||
| 168 | #ifdef CONFIG_PREEMPT | ||
| 169 | if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) | ||
| 170 | preempt_schedule(); /* Allow test to be preempted. */ | ||
| 171 | #endif | ||
| 172 | } | ||
| 173 | |||
| 174 | static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) | ||
| 175 | { | ||
| 176 | spin_unlock(&torture_spinlock); | ||
| 177 | } | ||
| 178 | |||
| 179 | static struct lock_torture_ops spin_lock_ops = { | ||
| 180 | .writelock = torture_spin_lock_write_lock, | ||
| 181 | .write_delay = torture_spin_lock_write_delay, | ||
| 182 | .writeunlock = torture_spin_lock_write_unlock, | ||
| 183 | .name = "spin_lock" | ||
| 184 | }; | ||
| 185 | |||
| 186 | static int torture_spin_lock_write_lock_irq(void) | ||
| 187 | __acquires(torture_spinlock_irq) | ||
| 188 | { | ||
| 189 | unsigned long flags; | ||
| 190 | |||
| 191 | spin_lock_irqsave(&torture_spinlock, flags); | ||
| 192 | cur_ops->flags = flags; | ||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | static void torture_lock_spin_write_unlock_irq(void) | ||
| 197 | __releases(torture_spinlock) | ||
| 198 | { | ||
| 199 | spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags); | ||
| 200 | } | ||
| 201 | |||
| 202 | static struct lock_torture_ops spin_lock_irq_ops = { | ||
| 203 | .writelock = torture_spin_lock_write_lock_irq, | ||
| 204 | .write_delay = torture_spin_lock_write_delay, | ||
| 205 | .writeunlock = torture_lock_spin_write_unlock_irq, | ||
| 206 | .name = "spin_lock_irq" | ||
| 207 | }; | ||
| 208 | |||
| 209 | /* | ||
| 210 | * Lock torture writer kthread. Repeatedly acquires and releases | ||
| 211 | * the lock, checking for duplicate acquisitions. | ||
| 212 | */ | ||
| 213 | static int lock_torture_writer(void *arg) | ||
| 214 | { | ||
| 215 | struct lock_writer_stress_stats *lwsp = arg; | ||
| 216 | static DEFINE_TORTURE_RANDOM(rand); | ||
| 217 | |||
| 218 | VERBOSE_TOROUT_STRING("lock_torture_writer task started"); | ||
| 219 | set_user_nice(current, 19); | ||
| 220 | |||
| 221 | do { | ||
| 222 | schedule_timeout_uninterruptible(1); | ||
| 223 | cur_ops->writelock(); | ||
| 224 | if (WARN_ON_ONCE(lock_is_write_held)) | ||
| 225 | lwsp->n_write_lock_fail++; | ||
| 226 | lock_is_write_held = 1; | ||
| 227 | lwsp->n_write_lock_acquired++; | ||
| 228 | cur_ops->write_delay(&rand); | ||
| 229 | lock_is_write_held = 0; | ||
| 230 | cur_ops->writeunlock(); | ||
| 231 | stutter_wait("lock_torture_writer"); | ||
| 232 | } while (!torture_must_stop()); | ||
| 233 | torture_kthread_stopping("lock_torture_writer"); | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* | ||
| 238 | * Create an lock-torture-statistics message in the specified buffer. | ||
| 239 | */ | ||
| 240 | static void lock_torture_printk(char *page) | ||
| 241 | { | ||
| 242 | bool fail = 0; | ||
| 243 | int i; | ||
| 244 | long max = 0; | ||
| 245 | long min = lwsa[0].n_write_lock_acquired; | ||
| 246 | long long sum = 0; | ||
| 247 | |||
| 248 | for (i = 0; i < nrealwriters_stress; i++) { | ||
| 249 | if (lwsa[i].n_write_lock_fail) | ||
| 250 | fail = true; | ||
| 251 | sum += lwsa[i].n_write_lock_acquired; | ||
| 252 | if (max < lwsa[i].n_write_lock_fail) | ||
| 253 | max = lwsa[i].n_write_lock_fail; | ||
| 254 | if (min > lwsa[i].n_write_lock_fail) | ||
| 255 | min = lwsa[i].n_write_lock_fail; | ||
| 256 | } | ||
| 257 | page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG); | ||
| 258 | page += sprintf(page, | ||
| 259 | "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", | ||
| 260 | sum, max, min, max / 2 > min ? "???" : "", | ||
| 261 | fail, fail ? "!!!" : ""); | ||
| 262 | if (fail) | ||
| 263 | atomic_inc(&n_lock_torture_errors); | ||
| 264 | } | ||
| 265 | |||
| 266 | /* | ||
| 267 | * Print torture statistics. Caller must ensure that there is only one | ||
| 268 | * call to this function at a given time!!! This is normally accomplished | ||
| 269 | * by relying on the module system to only have one copy of the module | ||
| 270 | * loaded, and then by giving the lock_torture_stats kthread full control | ||
| 271 | * (or the init/cleanup functions when lock_torture_stats thread is not | ||
| 272 | * running). | ||
| 273 | */ | ||
| 274 | static void lock_torture_stats_print(void) | ||
| 275 | { | ||
| 276 | int size = nrealwriters_stress * 200 + 8192; | ||
| 277 | char *buf; | ||
| 278 | |||
| 279 | buf = kmalloc(size, GFP_KERNEL); | ||
| 280 | if (!buf) { | ||
| 281 | pr_err("lock_torture_stats_print: Out of memory, need: %d", | ||
| 282 | size); | ||
| 283 | return; | ||
| 284 | } | ||
| 285 | lock_torture_printk(buf); | ||
| 286 | pr_alert("%s", buf); | ||
| 287 | kfree(buf); | ||
| 288 | } | ||
| 289 | |||
| 290 | /* | ||
| 291 | * Periodically prints torture statistics, if periodic statistics printing | ||
| 292 | * was specified via the stat_interval module parameter. | ||
| 293 | * | ||
| 294 | * No need to worry about fullstop here, since this one doesn't reference | ||
| 295 | * volatile state or register callbacks. | ||
| 296 | */ | ||
| 297 | static int lock_torture_stats(void *arg) | ||
| 298 | { | ||
| 299 | VERBOSE_TOROUT_STRING("lock_torture_stats task started"); | ||
| 300 | do { | ||
| 301 | schedule_timeout_interruptible(stat_interval * HZ); | ||
| 302 | lock_torture_stats_print(); | ||
| 303 | torture_shutdown_absorb("lock_torture_stats"); | ||
| 304 | } while (!torture_must_stop()); | ||
| 305 | torture_kthread_stopping("lock_torture_stats"); | ||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline void | ||
| 310 | lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, | ||
| 311 | const char *tag) | ||
| 312 | { | ||
| 313 | pr_alert("%s" TORTURE_FLAG | ||
| 314 | "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", | ||
| 315 | torture_type, tag, nrealwriters_stress, stat_interval, verbose, | ||
| 316 | shuffle_interval, stutter, shutdown_secs, | ||
| 317 | onoff_interval, onoff_holdoff); | ||
| 318 | } | ||
| 319 | |||
| 320 | static void lock_torture_cleanup(void) | ||
| 321 | { | ||
| 322 | int i; | ||
| 323 | |||
| 324 | if (torture_cleanup()) | ||
| 325 | return; | ||
| 326 | |||
| 327 | if (writer_tasks) { | ||
| 328 | for (i = 0; i < nrealwriters_stress; i++) | ||
| 329 | torture_stop_kthread(lock_torture_writer, | ||
| 330 | writer_tasks[i]); | ||
| 331 | kfree(writer_tasks); | ||
| 332 | writer_tasks = NULL; | ||
| 333 | } | ||
| 334 | |||
| 335 | torture_stop_kthread(lock_torture_stats, stats_task); | ||
| 336 | lock_torture_stats_print(); /* -After- the stats thread is stopped! */ | ||
| 337 | |||
| 338 | if (atomic_read(&n_lock_torture_errors)) | ||
| 339 | lock_torture_print_module_parms(cur_ops, | ||
| 340 | "End of test: FAILURE"); | ||
| 341 | else if (torture_onoff_failures()) | ||
| 342 | lock_torture_print_module_parms(cur_ops, | ||
| 343 | "End of test: LOCK_HOTPLUG"); | ||
| 344 | else | ||
| 345 | lock_torture_print_module_parms(cur_ops, | ||
| 346 | "End of test: SUCCESS"); | ||
| 347 | } | ||
| 348 | |||
| 349 | static int __init lock_torture_init(void) | ||
| 350 | { | ||
| 351 | int i; | ||
| 352 | int firsterr = 0; | ||
| 353 | static struct lock_torture_ops *torture_ops[] = { | ||
| 354 | &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, | ||
| 355 | }; | ||
| 356 | |||
| 357 | torture_init_begin(torture_type, verbose, &locktorture_runnable); | ||
| 358 | |||
| 359 | /* Process args and tell the world that the torturer is on the job. */ | ||
| 360 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | ||
| 361 | cur_ops = torture_ops[i]; | ||
| 362 | if (strcmp(torture_type, cur_ops->name) == 0) | ||
| 363 | break; | ||
| 364 | } | ||
| 365 | if (i == ARRAY_SIZE(torture_ops)) { | ||
| 366 | pr_alert("lock-torture: invalid torture type: \"%s\"\n", | ||
| 367 | torture_type); | ||
| 368 | pr_alert("lock-torture types:"); | ||
| 369 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | ||
| 370 | pr_alert(" %s", torture_ops[i]->name); | ||
| 371 | pr_alert("\n"); | ||
| 372 | torture_init_end(); | ||
| 373 | return -EINVAL; | ||
| 374 | } | ||
| 375 | if (cur_ops->init) | ||
| 376 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | ||
| 377 | |||
| 378 | if (nwriters_stress >= 0) | ||
| 379 | nrealwriters_stress = nwriters_stress; | ||
| 380 | else | ||
| 381 | nrealwriters_stress = 2 * num_online_cpus(); | ||
| 382 | lock_torture_print_module_parms(cur_ops, "Start of test"); | ||
| 383 | |||
| 384 | /* Initialize the statistics so that each run gets its own numbers. */ | ||
| 385 | |||
| 386 | lock_is_write_held = 0; | ||
| 387 | lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL); | ||
| 388 | if (lwsa == NULL) { | ||
| 389 | VERBOSE_TOROUT_STRING("lwsa: Out of memory"); | ||
| 390 | firsterr = -ENOMEM; | ||
| 391 | goto unwind; | ||
| 392 | } | ||
| 393 | for (i = 0; i < nrealwriters_stress; i++) { | ||
| 394 | lwsa[i].n_write_lock_fail = 0; | ||
| 395 | lwsa[i].n_write_lock_acquired = 0; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* Start up the kthreads. */ | ||
| 399 | |||
| 400 | if (onoff_interval > 0) { | ||
| 401 | firsterr = torture_onoff_init(onoff_holdoff * HZ, | ||
| 402 | onoff_interval * HZ); | ||
| 403 | if (firsterr) | ||
| 404 | goto unwind; | ||
| 405 | } | ||
| 406 | if (shuffle_interval > 0) { | ||
| 407 | firsterr = torture_shuffle_init(shuffle_interval); | ||
| 408 | if (firsterr) | ||
| 409 | goto unwind; | ||
| 410 | } | ||
| 411 | if (shutdown_secs > 0) { | ||
| 412 | firsterr = torture_shutdown_init(shutdown_secs, | ||
| 413 | lock_torture_cleanup); | ||
| 414 | if (firsterr) | ||
| 415 | goto unwind; | ||
| 416 | } | ||
| 417 | if (stutter > 0) { | ||
| 418 | firsterr = torture_stutter_init(stutter); | ||
| 419 | if (firsterr) | ||
| 420 | goto unwind; | ||
| 421 | } | ||
| 422 | |||
| 423 | writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]), | ||
| 424 | GFP_KERNEL); | ||
| 425 | if (writer_tasks == NULL) { | ||
| 426 | VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); | ||
| 427 | firsterr = -ENOMEM; | ||
| 428 | goto unwind; | ||
| 429 | } | ||
| 430 | for (i = 0; i < nrealwriters_stress; i++) { | ||
| 431 | firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i], | ||
| 432 | writer_tasks[i]); | ||
| 433 | if (firsterr) | ||
| 434 | goto unwind; | ||
| 435 | } | ||
| 436 | if (stat_interval > 0) { | ||
| 437 | firsterr = torture_create_kthread(lock_torture_stats, NULL, | ||
| 438 | stats_task); | ||
| 439 | if (firsterr) | ||
| 440 | goto unwind; | ||
| 441 | } | ||
| 442 | torture_init_end(); | ||
| 443 | return 0; | ||
| 444 | |||
| 445 | unwind: | ||
| 446 | torture_init_end(); | ||
| 447 | lock_torture_cleanup(); | ||
| 448 | return firsterr; | ||
| 449 | } | ||
| 450 | |||
| 451 | module_init(lock_torture_init); | ||
| 452 | module_exit(lock_torture_cleanup); | ||
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c new file mode 100644 index 000000000000..838dc9e00669 --- /dev/null +++ b/kernel/locking/mcs_spinlock.c | |||
| @@ -0,0 +1,178 @@ | |||
| 1 | |||
| 2 | #include <linux/percpu.h> | ||
| 3 | #include <linux/mutex.h> | ||
| 4 | #include <linux/sched.h> | ||
| 5 | #include "mcs_spinlock.h" | ||
| 6 | |||
| 7 | #ifdef CONFIG_SMP | ||
| 8 | |||
| 9 | /* | ||
| 10 | * An MCS like lock especially tailored for optimistic spinning for sleeping | ||
| 11 | * lock implementations (mutex, rwsem, etc). | ||
| 12 | * | ||
| 13 | * Using a single mcs node per CPU is safe because sleeping locks should not be | ||
| 14 | * called from interrupt context and we have preemption disabled while | ||
| 15 | * spinning. | ||
| 16 | */ | ||
| 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); | ||
| 18 | |||
| 19 | /* | ||
| 20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | ||
| 21 | * Can return NULL in case we were the last queued and we updated @lock instead. | ||
| 22 | */ | ||
| 23 | static inline struct optimistic_spin_queue * | ||
| 24 | osq_wait_next(struct optimistic_spin_queue **lock, | ||
| 25 | struct optimistic_spin_queue *node, | ||
| 26 | struct optimistic_spin_queue *prev) | ||
| 27 | { | ||
| 28 | struct optimistic_spin_queue *next = NULL; | ||
| 29 | |||
| 30 | for (;;) { | ||
| 31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { | ||
| 32 | /* | ||
| 33 | * We were the last queued, we moved @lock back. @prev | ||
| 34 | * will now observe @lock and will complete its | ||
| 35 | * unlock()/unqueue(). | ||
| 36 | */ | ||
| 37 | break; | ||
| 38 | } | ||
| 39 | |||
| 40 | /* | ||
| 41 | * We must xchg() the @node->next value, because if we were to | ||
| 42 | * leave it in, a concurrent unlock()/unqueue() from | ||
| 43 | * @node->next might complete Step-A and think its @prev is | ||
| 44 | * still valid. | ||
| 45 | * | ||
| 46 | * If the concurrent unlock()/unqueue() wins the race, we'll | ||
| 47 | * wait for either @lock to point to us, through its Step-B, or | ||
| 48 | * wait for a new @node->next from its Step-C. | ||
| 49 | */ | ||
| 50 | if (node->next) { | ||
| 51 | next = xchg(&node->next, NULL); | ||
| 52 | if (next) | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | |||
| 56 | arch_mutex_cpu_relax(); | ||
| 57 | } | ||
| 58 | |||
| 59 | return next; | ||
| 60 | } | ||
| 61 | |||
| 62 | bool osq_lock(struct optimistic_spin_queue **lock) | ||
| 63 | { | ||
| 64 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | ||
| 65 | struct optimistic_spin_queue *prev, *next; | ||
| 66 | |||
| 67 | node->locked = 0; | ||
| 68 | node->next = NULL; | ||
| 69 | |||
| 70 | node->prev = prev = xchg(lock, node); | ||
| 71 | if (likely(prev == NULL)) | ||
| 72 | return true; | ||
| 73 | |||
| 74 | ACCESS_ONCE(prev->next) = node; | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Normally @prev is untouchable after the above store; because at that | ||
| 78 | * moment unlock can proceed and wipe the node element from stack. | ||
| 79 | * | ||
| 80 | * However, since our nodes are static per-cpu storage, we're | ||
| 81 | * guaranteed their existence -- this allows us to apply | ||
| 82 | * cmpxchg in an attempt to undo our queueing. | ||
| 83 | */ | ||
| 84 | |||
| 85 | while (!smp_load_acquire(&node->locked)) { | ||
| 86 | /* | ||
| 87 | * If we need to reschedule bail... so we can block. | ||
| 88 | */ | ||
| 89 | if (need_resched()) | ||
| 90 | goto unqueue; | ||
| 91 | |||
| 92 | arch_mutex_cpu_relax(); | ||
| 93 | } | ||
| 94 | return true; | ||
| 95 | |||
| 96 | unqueue: | ||
| 97 | /* | ||
| 98 | * Step - A -- stabilize @prev | ||
| 99 | * | ||
| 100 | * Undo our @prev->next assignment; this will make @prev's | ||
| 101 | * unlock()/unqueue() wait for a next pointer since @lock points to us | ||
| 102 | * (or later). | ||
| 103 | */ | ||
| 104 | |||
| 105 | for (;;) { | ||
| 106 | if (prev->next == node && | ||
| 107 | cmpxchg(&prev->next, node, NULL) == node) | ||
| 108 | break; | ||
| 109 | |||
| 110 | /* | ||
| 111 | * We can only fail the cmpxchg() racing against an unlock(), | ||
| 112 | * in which case we should observe @node->locked becomming | ||
| 113 | * true. | ||
| 114 | */ | ||
| 115 | if (smp_load_acquire(&node->locked)) | ||
| 116 | return true; | ||
| 117 | |||
| 118 | arch_mutex_cpu_relax(); | ||
| 119 | |||
| 120 | /* | ||
| 121 | * Or we race against a concurrent unqueue()'s step-B, in which | ||
| 122 | * case its step-C will write us a new @node->prev pointer. | ||
| 123 | */ | ||
| 124 | prev = ACCESS_ONCE(node->prev); | ||
| 125 | } | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Step - B -- stabilize @next | ||
| 129 | * | ||
| 130 | * Similar to unlock(), wait for @node->next or move @lock from @node | ||
| 131 | * back to @prev. | ||
| 132 | */ | ||
| 133 | |||
| 134 | next = osq_wait_next(lock, node, prev); | ||
| 135 | if (!next) | ||
| 136 | return false; | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Step - C -- unlink | ||
| 140 | * | ||
| 141 | * @prev is stable because its still waiting for a new @prev->next | ||
| 142 | * pointer, @next is stable because our @node->next pointer is NULL and | ||
| 143 | * it will wait in Step-A. | ||
| 144 | */ | ||
| 145 | |||
| 146 | ACCESS_ONCE(next->prev) = prev; | ||
| 147 | ACCESS_ONCE(prev->next) = next; | ||
| 148 | |||
| 149 | return false; | ||
| 150 | } | ||
| 151 | |||
| 152 | void osq_unlock(struct optimistic_spin_queue **lock) | ||
| 153 | { | ||
| 154 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | ||
| 155 | struct optimistic_spin_queue *next; | ||
| 156 | |||
| 157 | /* | ||
| 158 | * Fast path for the uncontended case. | ||
| 159 | */ | ||
| 160 | if (likely(cmpxchg(lock, node, NULL) == node)) | ||
| 161 | return; | ||
| 162 | |||
| 163 | /* | ||
| 164 | * Second most likely case. | ||
| 165 | */ | ||
| 166 | next = xchg(&node->next, NULL); | ||
| 167 | if (next) { | ||
| 168 | ACCESS_ONCE(next->locked) = 1; | ||
| 169 | return; | ||
| 170 | } | ||
| 171 | |||
| 172 | next = osq_wait_next(lock, node, NULL); | ||
| 173 | if (next) | ||
| 174 | ACCESS_ONCE(next->locked) = 1; | ||
| 175 | } | ||
| 176 | |||
| 177 | #endif | ||
| 178 | |||
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h new file mode 100644 index 000000000000..a2dbac4aca6b --- /dev/null +++ b/kernel/locking/mcs_spinlock.h | |||
| @@ -0,0 +1,129 @@ | |||
| 1 | /* | ||
| 2 | * MCS lock defines | ||
| 3 | * | ||
| 4 | * This file contains the main data structure and API definitions of MCS lock. | ||
| 5 | * | ||
| 6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | ||
| 7 | * with the desirable properties of being fair, and with each cpu trying | ||
| 8 | * to acquire the lock spinning on a local variable. | ||
| 9 | * It avoids expensive cache bouncings that common test-and-set spin-lock | ||
| 10 | * implementations incur. | ||
| 11 | */ | ||
| 12 | #ifndef __LINUX_MCS_SPINLOCK_H | ||
| 13 | #define __LINUX_MCS_SPINLOCK_H | ||
| 14 | |||
| 15 | #include <asm/mcs_spinlock.h> | ||
| 16 | |||
| 17 | struct mcs_spinlock { | ||
| 18 | struct mcs_spinlock *next; | ||
| 19 | int locked; /* 1 if lock acquired */ | ||
| 20 | }; | ||
| 21 | |||
| 22 | #ifndef arch_mcs_spin_lock_contended | ||
| 23 | /* | ||
| 24 | * Using smp_load_acquire() provides a memory barrier that ensures | ||
| 25 | * subsequent operations happen after the lock is acquired. | ||
| 26 | */ | ||
| 27 | #define arch_mcs_spin_lock_contended(l) \ | ||
| 28 | do { \ | ||
| 29 | while (!(smp_load_acquire(l))) \ | ||
| 30 | arch_mutex_cpu_relax(); \ | ||
| 31 | } while (0) | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #ifndef arch_mcs_spin_unlock_contended | ||
| 35 | /* | ||
| 36 | * smp_store_release() provides a memory barrier to ensure all | ||
| 37 | * operations in the critical section has been completed before | ||
| 38 | * unlocking. | ||
| 39 | */ | ||
| 40 | #define arch_mcs_spin_unlock_contended(l) \ | ||
| 41 | smp_store_release((l), 1) | ||
| 42 | #endif | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Note: the smp_load_acquire/smp_store_release pair is not | ||
| 46 | * sufficient to form a full memory barrier across | ||
| 47 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | ||
| 48 | * For applications that need a full barrier across multiple cpus | ||
| 49 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | ||
| 50 | * used after mcs_lock. | ||
| 51 | */ | ||
| 52 | |||
| 53 | /* | ||
| 54 | * In order to acquire the lock, the caller should declare a local node and | ||
| 55 | * pass a reference of the node to this function in addition to the lock. | ||
| 56 | * If the lock has already been acquired, then this will proceed to spin | ||
| 57 | * on this node->locked until the previous lock holder sets the node->locked | ||
| 58 | * in mcs_spin_unlock(). | ||
| 59 | * | ||
| 60 | * We don't inline mcs_spin_lock() so that perf can correctly account for the | ||
| 61 | * time spent in this lock function. | ||
| 62 | */ | ||
| 63 | static inline | ||
| 64 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
| 65 | { | ||
| 66 | struct mcs_spinlock *prev; | ||
| 67 | |||
| 68 | /* Init node */ | ||
| 69 | node->locked = 0; | ||
| 70 | node->next = NULL; | ||
| 71 | |||
| 72 | prev = xchg(lock, node); | ||
| 73 | if (likely(prev == NULL)) { | ||
| 74 | /* | ||
| 75 | * Lock acquired, don't need to set node->locked to 1. Threads | ||
| 76 | * only spin on its own node->locked value for lock acquisition. | ||
| 77 | * However, since this thread can immediately acquire the lock | ||
| 78 | * and does not proceed to spin on its own node->locked, this | ||
| 79 | * value won't be used. If a debug mode is needed to | ||
| 80 | * audit lock status, then set node->locked value here. | ||
| 81 | */ | ||
| 82 | return; | ||
| 83 | } | ||
| 84 | ACCESS_ONCE(prev->next) = node; | ||
| 85 | |||
| 86 | /* Wait until the lock holder passes the lock down. */ | ||
| 87 | arch_mcs_spin_lock_contended(&node->locked); | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * Releases the lock. The caller should pass in the corresponding node that | ||
| 92 | * was used to acquire the lock. | ||
| 93 | */ | ||
| 94 | static inline | ||
| 95 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
| 96 | { | ||
| 97 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); | ||
| 98 | |||
| 99 | if (likely(!next)) { | ||
| 100 | /* | ||
| 101 | * Release the lock by setting it to NULL | ||
| 102 | */ | ||
| 103 | if (likely(cmpxchg(lock, node, NULL) == node)) | ||
| 104 | return; | ||
| 105 | /* Wait until the next pointer is set */ | ||
| 106 | while (!(next = ACCESS_ONCE(node->next))) | ||
| 107 | arch_mutex_cpu_relax(); | ||
| 108 | } | ||
| 109 | |||
| 110 | /* Pass lock to next waiter. */ | ||
| 111 | arch_mcs_spin_unlock_contended(&next->locked); | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Cancellable version of the MCS lock above. | ||
| 116 | * | ||
| 117 | * Intended for adaptive spinning of sleeping locks: | ||
| 118 | * mutex_lock()/rwsem_down_{read,write}() etc. | ||
| 119 | */ | ||
| 120 | |||
| 121 | struct optimistic_spin_queue { | ||
| 122 | struct optimistic_spin_queue *next, *prev; | ||
| 123 | int locked; /* 1 if lock acquired */ | ||
| 124 | }; | ||
| 125 | |||
| 126 | extern bool osq_lock(struct optimistic_spin_queue **lock); | ||
| 127 | extern void osq_unlock(struct optimistic_spin_queue **lock); | ||
| 128 | |||
| 129 | #endif /* __LINUX_MCS_SPINLOCK_H */ | ||
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index faf6f5b53e77..e1191c996c59 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
| @@ -83,6 +83,12 @@ void debug_mutex_unlock(struct mutex *lock) | |||
| 83 | 83 | ||
| 84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
| 85 | mutex_clear_owner(lock); | 85 | mutex_clear_owner(lock); |
| 86 | |||
| 87 | /* | ||
| 88 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug | ||
| 89 | * mutexes so that we can do it here after we've verified state. | ||
| 90 | */ | ||
| 91 | atomic_set(&lock->count, 1); | ||
| 86 | } | 92 | } |
| 87 | 93 | ||
| 88 | void debug_mutex_init(struct mutex *lock, const char *name, | 94 | void debug_mutex_init(struct mutex *lock, const char *name, |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4dd6e4c219de..bc73d33c6760 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/debug_locks.h> | 27 | #include <linux/debug_locks.h> |
| 28 | #include "mcs_spinlock.h" | ||
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 31 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| @@ -33,6 +34,13 @@ | |||
| 33 | #ifdef CONFIG_DEBUG_MUTEXES | 34 | #ifdef CONFIG_DEBUG_MUTEXES |
| 34 | # include "mutex-debug.h" | 35 | # include "mutex-debug.h" |
| 35 | # include <asm-generic/mutex-null.h> | 36 | # include <asm-generic/mutex-null.h> |
| 37 | /* | ||
| 38 | * Must be 0 for the debug case so we do not do the unlock outside of the | ||
| 39 | * wait_lock region. debug_mutex_unlock() will do the actual unlock in this | ||
| 40 | * case. | ||
| 41 | */ | ||
| 42 | # undef __mutex_slowpath_needs_to_unlock | ||
| 43 | # define __mutex_slowpath_needs_to_unlock() 0 | ||
| 36 | #else | 44 | #else |
| 37 | # include "mutex.h" | 45 | # include "mutex.h" |
| 38 | # include <asm/mutex.h> | 46 | # include <asm/mutex.h> |
| @@ -52,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
| 52 | INIT_LIST_HEAD(&lock->wait_list); | 60 | INIT_LIST_HEAD(&lock->wait_list); |
| 53 | mutex_clear_owner(lock); | 61 | mutex_clear_owner(lock); |
| 54 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 62 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 55 | lock->spin_mlock = NULL; | 63 | lock->osq = NULL; |
| 56 | #endif | 64 | #endif |
| 57 | 65 | ||
| 58 | debug_mutex_init(lock, name, key); | 66 | debug_mutex_init(lock, name, key); |
| @@ -67,8 +75,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
| 67 | * We also put the fastpath first in the kernel image, to make sure the | 75 | * We also put the fastpath first in the kernel image, to make sure the |
| 68 | * branch is predicted by the CPU as default-untaken. | 76 | * branch is predicted by the CPU as default-untaken. |
| 69 | */ | 77 | */ |
| 70 | static __used noinline void __sched | 78 | __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); |
| 71 | __mutex_lock_slowpath(atomic_t *lock_count); | ||
| 72 | 79 | ||
| 73 | /** | 80 | /** |
| 74 | * mutex_lock - acquire the mutex | 81 | * mutex_lock - acquire the mutex |
| @@ -111,54 +118,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
| 111 | * more or less simultaneously, the spinners need to acquire a MCS lock | 118 | * more or less simultaneously, the spinners need to acquire a MCS lock |
| 112 | * first before spinning on the owner field. | 119 | * first before spinning on the owner field. |
| 113 | * | 120 | * |
| 114 | * We don't inline mspin_lock() so that perf can correctly account for the | ||
| 115 | * time spent in this lock function. | ||
| 116 | */ | 121 | */ |
| 117 | struct mspin_node { | ||
| 118 | struct mspin_node *next ; | ||
| 119 | int locked; /* 1 if lock acquired */ | ||
| 120 | }; | ||
| 121 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | ||
| 122 | |||
| 123 | static noinline | ||
| 124 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | ||
| 125 | { | ||
| 126 | struct mspin_node *prev; | ||
| 127 | |||
| 128 | /* Init node */ | ||
| 129 | node->locked = 0; | ||
| 130 | node->next = NULL; | ||
| 131 | |||
| 132 | prev = xchg(lock, node); | ||
| 133 | if (likely(prev == NULL)) { | ||
| 134 | /* Lock acquired */ | ||
| 135 | node->locked = 1; | ||
| 136 | return; | ||
| 137 | } | ||
| 138 | ACCESS_ONCE(prev->next) = node; | ||
| 139 | smp_wmb(); | ||
| 140 | /* Wait until the lock holder passes the lock down */ | ||
| 141 | while (!ACCESS_ONCE(node->locked)) | ||
| 142 | arch_mutex_cpu_relax(); | ||
| 143 | } | ||
| 144 | |||
| 145 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | ||
| 146 | { | ||
| 147 | struct mspin_node *next = ACCESS_ONCE(node->next); | ||
| 148 | |||
| 149 | if (likely(!next)) { | ||
| 150 | /* | ||
| 151 | * Release the lock by setting it to NULL | ||
| 152 | */ | ||
| 153 | if (cmpxchg(lock, node, NULL) == node) | ||
| 154 | return; | ||
| 155 | /* Wait until the next pointer is set */ | ||
| 156 | while (!(next = ACCESS_ONCE(node->next))) | ||
| 157 | arch_mutex_cpu_relax(); | ||
| 158 | } | ||
| 159 | ACCESS_ONCE(next->locked) = 1; | ||
| 160 | smp_wmb(); | ||
| 161 | } | ||
| 162 | 122 | ||
| 163 | /* | 123 | /* |
| 164 | * Mutex spinning code migrated from kernel/sched/core.c | 124 | * Mutex spinning code migrated from kernel/sched/core.c |
| @@ -212,6 +172,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) | |||
| 212 | struct task_struct *owner; | 172 | struct task_struct *owner; |
| 213 | int retval = 1; | 173 | int retval = 1; |
| 214 | 174 | ||
| 175 | if (need_resched()) | ||
| 176 | return 0; | ||
| 177 | |||
| 215 | rcu_read_lock(); | 178 | rcu_read_lock(); |
| 216 | owner = ACCESS_ONCE(lock->owner); | 179 | owner = ACCESS_ONCE(lock->owner); |
| 217 | if (owner) | 180 | if (owner) |
| @@ -225,7 +188,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) | |||
| 225 | } | 188 | } |
| 226 | #endif | 189 | #endif |
| 227 | 190 | ||
| 228 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 191 | __visible __used noinline |
| 192 | void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | ||
| 229 | 193 | ||
| 230 | /** | 194 | /** |
| 231 | * mutex_unlock - release the mutex | 195 | * mutex_unlock - release the mutex |
| @@ -446,9 +410,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 446 | if (!mutex_can_spin_on_owner(lock)) | 410 | if (!mutex_can_spin_on_owner(lock)) |
| 447 | goto slowpath; | 411 | goto slowpath; |
| 448 | 412 | ||
| 413 | if (!osq_lock(&lock->osq)) | ||
| 414 | goto slowpath; | ||
| 415 | |||
| 449 | for (;;) { | 416 | for (;;) { |
| 450 | struct task_struct *owner; | 417 | struct task_struct *owner; |
| 451 | struct mspin_node node; | ||
| 452 | 418 | ||
| 453 | if (use_ww_ctx && ww_ctx->acquired > 0) { | 419 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
| 454 | struct ww_mutex *ww; | 420 | struct ww_mutex *ww; |
| @@ -463,19 +429,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 463 | * performed the optimistic spinning cannot be done. | 429 | * performed the optimistic spinning cannot be done. |
| 464 | */ | 430 | */ |
| 465 | if (ACCESS_ONCE(ww->ctx)) | 431 | if (ACCESS_ONCE(ww->ctx)) |
| 466 | goto slowpath; | 432 | break; |
| 467 | } | 433 | } |
| 468 | 434 | ||
| 469 | /* | 435 | /* |
| 470 | * If there's an owner, wait for it to either | 436 | * If there's an owner, wait for it to either |
| 471 | * release the lock or go to sleep. | 437 | * release the lock or go to sleep. |
| 472 | */ | 438 | */ |
| 473 | mspin_lock(MLOCK(lock), &node); | ||
| 474 | owner = ACCESS_ONCE(lock->owner); | 439 | owner = ACCESS_ONCE(lock->owner); |
| 475 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 440 | if (owner && !mutex_spin_on_owner(lock, owner)) |
| 476 | mspin_unlock(MLOCK(lock), &node); | 441 | break; |
| 477 | goto slowpath; | ||
| 478 | } | ||
| 479 | 442 | ||
| 480 | if ((atomic_read(&lock->count) == 1) && | 443 | if ((atomic_read(&lock->count) == 1) && |
| 481 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | 444 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
| @@ -488,11 +451,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 488 | } | 451 | } |
| 489 | 452 | ||
| 490 | mutex_set_owner(lock); | 453 | mutex_set_owner(lock); |
| 491 | mspin_unlock(MLOCK(lock), &node); | 454 | osq_unlock(&lock->osq); |
| 492 | preempt_enable(); | 455 | preempt_enable(); |
| 493 | return 0; | 456 | return 0; |
| 494 | } | 457 | } |
| 495 | mspin_unlock(MLOCK(lock), &node); | ||
| 496 | 458 | ||
| 497 | /* | 459 | /* |
| 498 | * When there's no owner, we might have preempted between the | 460 | * When there's no owner, we might have preempted between the |
| @@ -501,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 501 | * the owner complete. | 463 | * the owner complete. |
| 502 | */ | 464 | */ |
| 503 | if (!owner && (need_resched() || rt_task(task))) | 465 | if (!owner && (need_resched() || rt_task(task))) |
| 504 | goto slowpath; | 466 | break; |
| 505 | 467 | ||
| 506 | /* | 468 | /* |
| 507 | * The cpu_relax() call is a compiler barrier which forces | 469 | * The cpu_relax() call is a compiler barrier which forces |
| @@ -511,7 +473,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 511 | */ | 473 | */ |
| 512 | arch_mutex_cpu_relax(); | 474 | arch_mutex_cpu_relax(); |
| 513 | } | 475 | } |
| 476 | osq_unlock(&lock->osq); | ||
| 514 | slowpath: | 477 | slowpath: |
| 478 | /* | ||
| 479 | * If we fell out of the spin path because of need_resched(), | ||
| 480 | * reschedule now, before we try-lock the mutex. This avoids getting | ||
| 481 | * scheduled out right after we obtained the mutex. | ||
| 482 | */ | ||
| 483 | if (need_resched()) | ||
| 484 | schedule_preempt_disabled(); | ||
| 515 | #endif | 485 | #endif |
| 516 | spin_lock_mutex(&lock->wait_lock, flags); | 486 | spin_lock_mutex(&lock->wait_lock, flags); |
| 517 | 487 | ||
| @@ -717,10 +687,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
| 717 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 687 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 718 | unsigned long flags; | 688 | unsigned long flags; |
| 719 | 689 | ||
| 720 | spin_lock_mutex(&lock->wait_lock, flags); | ||
| 721 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
| 722 | debug_mutex_unlock(lock); | ||
| 723 | |||
| 724 | /* | 690 | /* |
| 725 | * some architectures leave the lock unlocked in the fastpath failure | 691 | * some architectures leave the lock unlocked in the fastpath failure |
| 726 | * case, others need to leave it locked. In the later case we have to | 692 | * case, others need to leave it locked. In the later case we have to |
| @@ -729,6 +695,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
| 729 | if (__mutex_slowpath_needs_to_unlock()) | 695 | if (__mutex_slowpath_needs_to_unlock()) |
| 730 | atomic_set(&lock->count, 1); | 696 | atomic_set(&lock->count, 1); |
| 731 | 697 | ||
| 698 | spin_lock_mutex(&lock->wait_lock, flags); | ||
| 699 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
| 700 | debug_mutex_unlock(lock); | ||
| 701 | |||
| 732 | if (!list_empty(&lock->wait_list)) { | 702 | if (!list_empty(&lock->wait_list)) { |
| 733 | /* get the first entry from the wait-list: */ | 703 | /* get the first entry from the wait-list: */ |
| 734 | struct mutex_waiter *waiter = | 704 | struct mutex_waiter *waiter = |
| @@ -746,7 +716,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
| 746 | /* | 716 | /* |
| 747 | * Release the lock, slowpath: | 717 | * Release the lock, slowpath: |
| 748 | */ | 718 | */ |
| 749 | static __used noinline void | 719 | __visible void |
| 750 | __mutex_unlock_slowpath(atomic_t *lock_count) | 720 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 751 | { | 721 | { |
| 752 | __mutex_unlock_common_slowpath(lock_count, 1); | 722 | __mutex_unlock_common_slowpath(lock_count, 1); |
| @@ -803,7 +773,7 @@ int __sched mutex_lock_killable(struct mutex *lock) | |||
| 803 | } | 773 | } |
| 804 | EXPORT_SYMBOL(mutex_lock_killable); | 774 | EXPORT_SYMBOL(mutex_lock_killable); |
| 805 | 775 | ||
| 806 | static __used noinline void __sched | 776 | __visible void __sched |
| 807 | __mutex_lock_slowpath(atomic_t *lock_count) | 777 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 808 | { | 778 | { |
| 809 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 779 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2e960a2bab81..aa4dff04b594 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -213,6 +213,18 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) | |||
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | /* | 215 | /* |
| 216 | * Called by sched_setscheduler() to check whether the priority change | ||
| 217 | * is overruled by a possible priority boosting. | ||
| 218 | */ | ||
| 219 | int rt_mutex_check_prio(struct task_struct *task, int newprio) | ||
| 220 | { | ||
| 221 | if (!task_has_pi_waiters(task)) | ||
| 222 | return 0; | ||
| 223 | |||
| 224 | return task_top_pi_waiter(task)->task->prio <= newprio; | ||
| 225 | } | ||
| 226 | |||
| 227 | /* | ||
| 216 | * Adjust the priority of a task, after its pi_waiters got modified. | 228 | * Adjust the priority of a task, after its pi_waiters got modified. |
| 217 | * | 229 | * |
| 218 | * This can be both boosting and unboosting. task->pi_lock must be held. | 230 | * This can be both boosting and unboosting. task->pi_lock must be held. |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 19c5fa95e0b4..1d66e08e897d 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -143,6 +143,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) | |||
| 143 | /* | 143 | /* |
| 144 | * wait for the read lock to be granted | 144 | * wait for the read lock to be granted |
| 145 | */ | 145 | */ |
| 146 | __visible | ||
| 146 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | 147 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
| 147 | { | 148 | { |
| 148 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; | 149 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
| @@ -190,6 +191,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | |||
| 190 | /* | 191 | /* |
| 191 | * wait until we successfully acquire the write lock | 192 | * wait until we successfully acquire the write lock |
| 192 | */ | 193 | */ |
| 194 | __visible | ||
| 193 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | 195 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
| 194 | { | 196 | { |
| 195 | long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; | 197 | long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; |
| @@ -252,6 +254,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
| 252 | * handle waking up a waiter on the semaphore | 254 | * handle waking up a waiter on the semaphore |
| 253 | * - up_read/up_write has decremented the active part of count if we come here | 255 | * - up_read/up_write has decremented the active part of count if we come here |
| 254 | */ | 256 | */ |
| 257 | __visible | ||
| 255 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | 258 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
| 256 | { | 259 | { |
| 257 | unsigned long flags; | 260 | unsigned long flags; |
| @@ -272,6 +275,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
| 272 | * - caller incremented waiting part of count and discovered it still negative | 275 | * - caller incremented waiting part of count and discovered it still negative |
| 273 | * - just wake up any readers at the front of the queue | 276 | * - just wake up any readers at the front of the queue |
| 274 | */ | 277 | */ |
| 278 | __visible | ||
| 275 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | 279 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
| 276 | { | 280 | { |
| 277 | unsigned long flags; | 281 | unsigned long flags; |
diff --git a/kernel/module.c b/kernel/module.c index d24fcf29cb64..11869408f79b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -640,7 +640,7 @@ static int module_unload_init(struct module *mod) | |||
| 640 | INIT_LIST_HEAD(&mod->target_list); | 640 | INIT_LIST_HEAD(&mod->target_list); |
| 641 | 641 | ||
| 642 | /* Hold reference count during initialization. */ | 642 | /* Hold reference count during initialization. */ |
| 643 | __this_cpu_write(mod->refptr->incs, 1); | 643 | raw_cpu_write(mod->refptr->incs, 1); |
| 644 | 644 | ||
| 645 | return 0; | 645 | return 0; |
| 646 | } | 646 | } |
| @@ -1013,9 +1013,11 @@ static size_t module_flags_taint(struct module *mod, char *buf) | |||
| 1013 | buf[l++] = 'F'; | 1013 | buf[l++] = 'F'; |
| 1014 | if (mod->taints & (1 << TAINT_CRAP)) | 1014 | if (mod->taints & (1 << TAINT_CRAP)) |
| 1015 | buf[l++] = 'C'; | 1015 | buf[l++] = 'C'; |
| 1016 | if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) | ||
| 1017 | buf[l++] = 'E'; | ||
| 1016 | /* | 1018 | /* |
| 1017 | * TAINT_FORCED_RMMOD: could be added. | 1019 | * TAINT_FORCED_RMMOD: could be added. |
| 1018 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | 1020 | * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't |
| 1019 | * apply to modules. | 1021 | * apply to modules. |
| 1020 | */ | 1022 | */ |
| 1021 | return l; | 1023 | return l; |
| @@ -1948,6 +1950,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) | |||
| 1948 | 1950 | ||
| 1949 | switch (sym[i].st_shndx) { | 1951 | switch (sym[i].st_shndx) { |
| 1950 | case SHN_COMMON: | 1952 | case SHN_COMMON: |
| 1953 | /* Ignore common symbols */ | ||
| 1954 | if (!strncmp(name, "__gnu_lto", 9)) | ||
| 1955 | break; | ||
| 1956 | |||
| 1951 | /* We compiled with -fno-common. These are not | 1957 | /* We compiled with -fno-common. These are not |
| 1952 | supposed to happen. */ | 1958 | supposed to happen. */ |
| 1953 | pr_debug("Common symbol: %s\n", name); | 1959 | pr_debug("Common symbol: %s\n", name); |
| @@ -3214,7 +3220,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3214 | pr_notice_once("%s: module verification failed: signature " | 3220 | pr_notice_once("%s: module verification failed: signature " |
| 3215 | "and/or required key missing - tainting " | 3221 | "and/or required key missing - tainting " |
| 3216 | "kernel\n", mod->name); | 3222 | "kernel\n", mod->name); |
| 3217 | add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK); | 3223 | add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); |
| 3218 | } | 3224 | } |
| 3219 | #endif | 3225 | #endif |
| 3220 | 3226 | ||
| @@ -3809,12 +3815,12 @@ void print_modules(void) | |||
| 3809 | list_for_each_entry_rcu(mod, &modules, list) { | 3815 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3810 | if (mod->state == MODULE_STATE_UNFORMED) | 3816 | if (mod->state == MODULE_STATE_UNFORMED) |
| 3811 | continue; | 3817 | continue; |
| 3812 | printk(" %s%s", mod->name, module_flags(mod, buf)); | 3818 | pr_cont(" %s%s", mod->name, module_flags(mod, buf)); |
| 3813 | } | 3819 | } |
| 3814 | preempt_enable(); | 3820 | preempt_enable(); |
| 3815 | if (last_unloaded_module[0]) | 3821 | if (last_unloaded_module[0]) |
| 3816 | printk(" [last unloaded: %s]", last_unloaded_module); | 3822 | pr_cont(" [last unloaded: %s]", last_unloaded_module); |
| 3817 | printk("\n"); | 3823 | pr_cont("\n"); |
| 3818 | } | 3824 | } |
| 3819 | 3825 | ||
| 3820 | #ifdef CONFIG_MODVERSIONS | 3826 | #ifdef CONFIG_MODVERSIONS |
diff --git a/kernel/notifier.c b/kernel/notifier.c index 2d5cc4ccff7f..db4c8b08a50c 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
| @@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, | |||
| 309 | * racy then it does not matter what the result of the test | 309 | * racy then it does not matter what the result of the test |
| 310 | * is, we re-check the list after having taken the lock anyway: | 310 | * is, we re-check the list after having taken the lock anyway: |
| 311 | */ | 311 | */ |
| 312 | if (rcu_dereference_raw(nh->head)) { | 312 | if (rcu_access_pointer(nh->head)) { |
| 313 | down_read(&nh->rwsem); | 313 | down_read(&nh->rwsem); |
| 314 | ret = notifier_call_chain(&nh->head, val, v, nr_to_call, | 314 | ret = notifier_call_chain(&nh->head, val, v, nr_to_call, |
| 315 | nr_calls); | 315 | nr_calls); |
diff --git a/kernel/panic.c b/kernel/panic.c index 6d6300375090..d02fa9fef46a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -100,7 +100,7 @@ void panic(const char *fmt, ...) | |||
| 100 | va_start(args, fmt); | 100 | va_start(args, fmt); |
| 101 | vsnprintf(buf, sizeof(buf), fmt, args); | 101 | vsnprintf(buf, sizeof(buf), fmt, args); |
| 102 | va_end(args); | 102 | va_end(args); |
| 103 | printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); | 103 | pr_emerg("Kernel panic - not syncing: %s\n", buf); |
| 104 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 104 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
| 105 | /* | 105 | /* |
| 106 | * Avoid nested stack-dumping if a panic occurs during oops processing | 106 | * Avoid nested stack-dumping if a panic occurs during oops processing |
| @@ -141,7 +141,7 @@ void panic(const char *fmt, ...) | |||
| 141 | * Delay timeout seconds before rebooting the machine. | 141 | * Delay timeout seconds before rebooting the machine. |
| 142 | * We can't use the "normal" timers since we just panicked. | 142 | * We can't use the "normal" timers since we just panicked. |
| 143 | */ | 143 | */ |
| 144 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); | 144 | pr_emerg("Rebooting in %d seconds..", panic_timeout); |
| 145 | 145 | ||
| 146 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { | 146 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
| 147 | touch_nmi_watchdog(); | 147 | touch_nmi_watchdog(); |
| @@ -165,7 +165,7 @@ void panic(const char *fmt, ...) | |||
| 165 | extern int stop_a_enabled; | 165 | extern int stop_a_enabled; |
| 166 | /* Make sure the user can actually press Stop-A (L1-A) */ | 166 | /* Make sure the user can actually press Stop-A (L1-A) */ |
| 167 | stop_a_enabled = 1; | 167 | stop_a_enabled = 1; |
| 168 | printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); | 168 | pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); |
| 169 | } | 169 | } |
| 170 | #endif | 170 | #endif |
| 171 | #if defined(CONFIG_S390) | 171 | #if defined(CONFIG_S390) |
| @@ -176,6 +176,7 @@ void panic(const char *fmt, ...) | |||
| 176 | disabled_wait(caller); | 176 | disabled_wait(caller); |
| 177 | } | 177 | } |
| 178 | #endif | 178 | #endif |
| 179 | pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf); | ||
| 179 | local_irq_enable(); | 180 | local_irq_enable(); |
| 180 | for (i = 0; ; i += PANIC_TIMER_STEP) { | 181 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
| 181 | touch_softlockup_watchdog(); | 182 | touch_softlockup_watchdog(); |
| @@ -199,7 +200,7 @@ struct tnt { | |||
| 199 | static const struct tnt tnts[] = { | 200 | static const struct tnt tnts[] = { |
| 200 | { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, | 201 | { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, |
| 201 | { TAINT_FORCED_MODULE, 'F', ' ' }, | 202 | { TAINT_FORCED_MODULE, 'F', ' ' }, |
| 202 | { TAINT_UNSAFE_SMP, 'S', ' ' }, | 203 | { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' }, |
| 203 | { TAINT_FORCED_RMMOD, 'R', ' ' }, | 204 | { TAINT_FORCED_RMMOD, 'R', ' ' }, |
| 204 | { TAINT_MACHINE_CHECK, 'M', ' ' }, | 205 | { TAINT_MACHINE_CHECK, 'M', ' ' }, |
| 205 | { TAINT_BAD_PAGE, 'B', ' ' }, | 206 | { TAINT_BAD_PAGE, 'B', ' ' }, |
| @@ -210,6 +211,7 @@ static const struct tnt tnts[] = { | |||
| 210 | { TAINT_CRAP, 'C', ' ' }, | 211 | { TAINT_CRAP, 'C', ' ' }, |
| 211 | { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, | 212 | { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, |
| 212 | { TAINT_OOT_MODULE, 'O', ' ' }, | 213 | { TAINT_OOT_MODULE, 'O', ' ' }, |
| 214 | { TAINT_UNSIGNED_MODULE, 'E', ' ' }, | ||
| 213 | }; | 215 | }; |
| 214 | 216 | ||
| 215 | /** | 217 | /** |
| @@ -228,6 +230,7 @@ static const struct tnt tnts[] = { | |||
| 228 | * 'C' - modules from drivers/staging are loaded. | 230 | * 'C' - modules from drivers/staging are loaded. |
| 229 | * 'I' - Working around severe firmware bug. | 231 | * 'I' - Working around severe firmware bug. |
| 230 | * 'O' - Out-of-tree module has been loaded. | 232 | * 'O' - Out-of-tree module has been loaded. |
| 233 | * 'E' - Unsigned module has been loaded. | ||
| 231 | * | 234 | * |
| 232 | * The string is overwritten by the next call to print_tainted(). | 235 | * The string is overwritten by the next call to print_tainted(). |
| 233 | */ | 236 | */ |
| @@ -274,8 +277,7 @@ unsigned long get_taint(void) | |||
| 274 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) | 277 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) |
| 275 | { | 278 | { |
| 276 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) | 279 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
| 277 | printk(KERN_WARNING | 280 | pr_warn("Disabling lock debugging due to kernel taint\n"); |
| 278 | "Disabling lock debugging due to kernel taint\n"); | ||
| 279 | 281 | ||
| 280 | set_bit(flag, &tainted_mask); | 282 | set_bit(flag, &tainted_mask); |
| 281 | } | 283 | } |
| @@ -380,8 +382,7 @@ late_initcall(init_oops_id); | |||
| 380 | void print_oops_end_marker(void) | 382 | void print_oops_end_marker(void) |
| 381 | { | 383 | { |
| 382 | init_oops_id(); | 384 | init_oops_id(); |
| 383 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", | 385 | pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); |
| 384 | (unsigned long long)oops_id); | ||
| 385 | } | 386 | } |
| 386 | 387 | ||
| 387 | /* | 388 | /* |
| @@ -459,7 +460,7 @@ EXPORT_SYMBOL(warn_slowpath_null); | |||
| 459 | * Called when gcc's -fstack-protector feature is used, and | 460 | * Called when gcc's -fstack-protector feature is used, and |
| 460 | * gcc detects corruption of the on-stack canary value | 461 | * gcc detects corruption of the on-stack canary value |
| 461 | */ | 462 | */ |
| 462 | void __stack_chk_fail(void) | 463 | __visible void __stack_chk_fail(void) |
| 463 | { | 464 | { |
| 464 | panic("stack-protector: Kernel stack is corrupted in: %p\n", | 465 | panic("stack-protector: Kernel stack is corrupted in: %p\n", |
| 465 | __builtin_return_address(0)); | 466 | __builtin_return_address(0)); |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 06c62de9c711..db95d8eb761b 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
| @@ -318,7 +318,9 @@ static void *pidns_get(struct task_struct *task) | |||
| 318 | struct pid_namespace *ns; | 318 | struct pid_namespace *ns; |
| 319 | 319 | ||
| 320 | rcu_read_lock(); | 320 | rcu_read_lock(); |
| 321 | ns = get_pid_ns(task_active_pid_ns(task)); | 321 | ns = task_active_pid_ns(task); |
| 322 | if (ns) | ||
| 323 | get_pid_ns(ns); | ||
| 322 | rcu_read_unlock(); | 324 | rcu_read_unlock(); |
| 323 | 325 | ||
| 324 | return ns; | 326 | return ns; |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 37170d4dd9a6..f4f2073711d3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -973,16 +973,20 @@ static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 973 | static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, | 973 | static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, |
| 974 | const char *buf, size_t n) | 974 | const char *buf, size_t n) |
| 975 | { | 975 | { |
| 976 | unsigned int maj, min; | ||
| 977 | dev_t res; | 976 | dev_t res; |
| 978 | int ret = -EINVAL; | 977 | int len = n; |
| 978 | char *name; | ||
| 979 | 979 | ||
| 980 | if (sscanf(buf, "%u:%u", &maj, &min) != 2) | 980 | if (len && buf[len-1] == '\n') |
| 981 | goto out; | 981 | len--; |
| 982 | name = kstrndup(buf, len, GFP_KERNEL); | ||
| 983 | if (!name) | ||
| 984 | return -ENOMEM; | ||
| 982 | 985 | ||
| 983 | res = MKDEV(maj,min); | 986 | res = name_to_dev_t(name); |
| 984 | if (maj != MAJOR(res) || min != MINOR(res)) | 987 | kfree(name); |
| 985 | goto out; | 988 | if (!res) |
| 989 | return -EINVAL; | ||
| 986 | 990 | ||
| 987 | lock_system_sleep(); | 991 | lock_system_sleep(); |
| 988 | swsusp_resume_device = res; | 992 | swsusp_resume_device = res; |
| @@ -990,9 +994,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 990 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); | 994 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); |
| 991 | noresume = 0; | 995 | noresume = 0; |
| 992 | software_resume(); | 996 | software_resume(); |
| 993 | ret = n; | 997 | return n; |
| 994 | out: | ||
| 995 | return ret; | ||
| 996 | } | 998 | } |
| 997 | 999 | ||
| 998 | power_attr(resume); | 1000 | power_attr(resume); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 1d1bf630e6e9..6271bc4073ef 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -282,8 +282,8 @@ struct kobject *power_kobj; | |||
| 282 | * state - control system power state. | 282 | * state - control system power state. |
| 283 | * | 283 | * |
| 284 | * show() returns what states are supported, which is hard-coded to | 284 | * show() returns what states are supported, which is hard-coded to |
| 285 | * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and | 285 | * 'freeze' (Low-Power Idle), 'standby' (Power-On Suspend), |
| 286 | * 'disk' (Suspend-to-Disk). | 286 | * 'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk). |
| 287 | * | 287 | * |
| 288 | * store() accepts one of those strings, translates it into the | 288 | * store() accepts one of those strings, translates it into the |
| 289 | * proper enumerated value, and initiates a suspend transition. | 289 | * proper enumerated value, and initiates a suspend transition. |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 7d4b7ffb3c1d..15f37ea08719 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/suspend_ioctls.h> | 2 | #include <linux/suspend_ioctls.h> |
| 3 | #include <linux/utsname.h> | 3 | #include <linux/utsname.h> |
| 4 | #include <linux/freezer.h> | 4 | #include <linux/freezer.h> |
| 5 | #include <linux/compiler.h> | ||
| 5 | 6 | ||
| 6 | struct swsusp_info { | 7 | struct swsusp_info { |
| 7 | struct new_utsname uts; | 8 | struct new_utsname uts; |
| @@ -11,7 +12,7 @@ struct swsusp_info { | |||
| 11 | unsigned long image_pages; | 12 | unsigned long image_pages; |
| 12 | unsigned long pages; | 13 | unsigned long pages; |
| 13 | unsigned long size; | 14 | unsigned long size; |
| 14 | } __attribute__((aligned(PAGE_SIZE))); | 15 | } __aligned(PAGE_SIZE); |
| 15 | 16 | ||
| 16 | #ifdef CONFIG_HIBERNATION | 17 | #ifdef CONFIG_HIBERNATION |
| 17 | /* kernel/power/snapshot.c */ | 18 | /* kernel/power/snapshot.c */ |
| @@ -49,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info) | |||
| 49 | */ | 50 | */ |
| 50 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) | 51 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) |
| 51 | 52 | ||
| 53 | asmlinkage int swsusp_save(void); | ||
| 54 | |||
| 52 | /* kernel/power/hibernate.c */ | 55 | /* kernel/power/hibernate.c */ |
| 53 | extern bool freezer_test_done; | 56 | extern bool freezer_test_done; |
| 54 | 57 | ||
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 8dff9b48075a..884b77058864 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
| @@ -66,6 +66,7 @@ static struct pm_qos_constraints cpu_dma_constraints = { | |||
| 66 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), | 66 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), |
| 67 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 67 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
| 68 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 68 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
| 69 | .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | ||
| 69 | .type = PM_QOS_MIN, | 70 | .type = PM_QOS_MIN, |
| 70 | .notifiers = &cpu_dma_lat_notifier, | 71 | .notifiers = &cpu_dma_lat_notifier, |
| 71 | }; | 72 | }; |
| @@ -79,6 +80,7 @@ static struct pm_qos_constraints network_lat_constraints = { | |||
| 79 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), | 80 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), |
| 80 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 81 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
| 81 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 82 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
| 83 | .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | ||
| 82 | .type = PM_QOS_MIN, | 84 | .type = PM_QOS_MIN, |
| 83 | .notifiers = &network_lat_notifier, | 85 | .notifiers = &network_lat_notifier, |
| 84 | }; | 86 | }; |
| @@ -93,6 +95,7 @@ static struct pm_qos_constraints network_tput_constraints = { | |||
| 93 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), | 95 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), |
| 94 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 96 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
| 95 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 97 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
| 98 | .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | ||
| 96 | .type = PM_QOS_MAX, | 99 | .type = PM_QOS_MAX, |
| 97 | .notifiers = &network_throughput_notifier, | 100 | .notifiers = &network_throughput_notifier, |
| 98 | }; | 101 | }; |
| @@ -128,7 +131,7 @@ static const struct file_operations pm_qos_power_fops = { | |||
| 128 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) | 131 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) |
| 129 | { | 132 | { |
| 130 | if (plist_head_empty(&c->list)) | 133 | if (plist_head_empty(&c->list)) |
| 131 | return c->default_value; | 134 | return c->no_constraint_value; |
| 132 | 135 | ||
| 133 | switch (c->type) { | 136 | switch (c->type) { |
| 134 | case PM_QOS_MIN: | 137 | case PM_QOS_MIN: |
| @@ -170,6 +173,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
| 170 | { | 173 | { |
| 171 | unsigned long flags; | 174 | unsigned long flags; |
| 172 | int prev_value, curr_value, new_value; | 175 | int prev_value, curr_value, new_value; |
| 176 | int ret; | ||
| 173 | 177 | ||
| 174 | spin_lock_irqsave(&pm_qos_lock, flags); | 178 | spin_lock_irqsave(&pm_qos_lock, flags); |
| 175 | prev_value = pm_qos_get_value(c); | 179 | prev_value = pm_qos_get_value(c); |
| @@ -205,13 +209,15 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
| 205 | 209 | ||
| 206 | trace_pm_qos_update_target(action, prev_value, curr_value); | 210 | trace_pm_qos_update_target(action, prev_value, curr_value); |
| 207 | if (prev_value != curr_value) { | 211 | if (prev_value != curr_value) { |
| 208 | blocking_notifier_call_chain(c->notifiers, | 212 | ret = 1; |
| 209 | (unsigned long)curr_value, | 213 | if (c->notifiers) |
| 210 | NULL); | 214 | blocking_notifier_call_chain(c->notifiers, |
| 211 | return 1; | 215 | (unsigned long)curr_value, |
| 216 | NULL); | ||
| 212 | } else { | 217 | } else { |
| 213 | return 0; | 218 | ret = 0; |
| 214 | } | 219 | } |
| 220 | return ret; | ||
| 215 | } | 221 | } |
| 216 | 222 | ||
| 217 | /** | 223 | /** |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d9f61a145802..18fb7a2fb14b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/highmem.h> | 27 | #include <linux/highmem.h> |
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 30 | #include <linux/compiler.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
| 32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
| @@ -155,7 +156,7 @@ static inline void free_image_page(void *addr, int clear_nosave_free) | |||
| 155 | struct linked_page { | 156 | struct linked_page { |
| 156 | struct linked_page *next; | 157 | struct linked_page *next; |
| 157 | char data[LINKED_PAGE_DATA_SIZE]; | 158 | char data[LINKED_PAGE_DATA_SIZE]; |
| 158 | } __attribute__((packed)); | 159 | } __packed; |
| 159 | 160 | ||
| 160 | static inline void | 161 | static inline void |
| 161 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) | 162 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) |
| @@ -1268,7 +1269,7 @@ static void free_unnecessary_pages(void) | |||
| 1268 | * [number of saveable pages] - [number of pages that can be freed in theory] | 1269 | * [number of saveable pages] - [number of pages that can be freed in theory] |
| 1269 | * | 1270 | * |
| 1270 | * where the second term is the sum of (1) reclaimable slab pages, (2) active | 1271 | * where the second term is the sum of (1) reclaimable slab pages, (2) active |
| 1271 | * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages, | 1272 | * and (3) inactive anonymous pages, (4) active and (5) inactive file pages, |
| 1272 | * minus mapped file pages. | 1273 | * minus mapped file pages. |
| 1273 | */ | 1274 | */ |
| 1274 | static unsigned long minimum_image_size(unsigned long saveable) | 1275 | static unsigned long minimum_image_size(unsigned long saveable) |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 62ee437b5c7e..c3ad9cafe930 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
| 27 | #include <linux/ftrace.h> | 27 | #include <linux/ftrace.h> |
| 28 | #include <trace/events/power.h> | 28 | #include <trace/events/power.h> |
| 29 | #include <linux/compiler.h> | ||
| 29 | 30 | ||
| 30 | #include "power.h" | 31 | #include "power.h" |
| 31 | 32 | ||
| @@ -39,7 +40,7 @@ static const struct platform_suspend_ops *suspend_ops; | |||
| 39 | 40 | ||
| 40 | static bool need_suspend_ops(suspend_state_t state) | 41 | static bool need_suspend_ops(suspend_state_t state) |
| 41 | { | 42 | { |
| 42 | return !!(state > PM_SUSPEND_FREEZE); | 43 | return state > PM_SUSPEND_FREEZE; |
| 43 | } | 44 | } |
| 44 | 45 | ||
| 45 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); | 46 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); |
| @@ -156,13 +157,13 @@ static int suspend_prepare(suspend_state_t state) | |||
| 156 | } | 157 | } |
| 157 | 158 | ||
| 158 | /* default implementation */ | 159 | /* default implementation */ |
| 159 | void __attribute__ ((weak)) arch_suspend_disable_irqs(void) | 160 | void __weak arch_suspend_disable_irqs(void) |
| 160 | { | 161 | { |
| 161 | local_irq_disable(); | 162 | local_irq_disable(); |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | /* default implementation */ | 165 | /* default implementation */ |
| 165 | void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | 166 | void __weak arch_suspend_enable_irqs(void) |
| 166 | { | 167 | { |
| 167 | local_irq_enable(); | 168 | local_irq_enable(); |
| 168 | } | 169 | } |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c33ed200410..8c9a4819f798 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -101,7 +101,7 @@ struct swsusp_header { | |||
| 101 | unsigned int flags; /* Flags to pass to the "boot" kernel */ | 101 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
| 102 | char orig_sig[10]; | 102 | char orig_sig[10]; |
| 103 | char sig[10]; | 103 | char sig[10]; |
| 104 | } __attribute__((packed)); | 104 | } __packed; |
| 105 | 105 | ||
| 106 | static struct swsusp_header *swsusp_header; | 106 | static struct swsusp_header *swsusp_header; |
| 107 | 107 | ||
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 8f50de394d22..019069c84ff6 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <linux/rbtree.h> | 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | 20 | ||
| 21 | #include "power.h" | ||
| 22 | |||
| 21 | static DEFINE_MUTEX(wakelocks_lock); | 23 | static DEFINE_MUTEX(wakelocks_lock); |
| 22 | 24 | ||
| 23 | struct wakelock { | 25 | struct wakelock { |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 4dae9cbe9259..a45b50962295 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -319,7 +319,7 @@ static void log_store(int facility, int level, | |||
| 319 | else | 319 | else |
| 320 | free = log_first_idx - log_next_idx; | 320 | free = log_first_idx - log_next_idx; |
| 321 | 321 | ||
| 322 | if (free > size + sizeof(struct printk_log)) | 322 | if (free >= size + sizeof(struct printk_log)) |
| 323 | break; | 323 | break; |
| 324 | 324 | ||
| 325 | /* drop old messages until we have enough contiuous space */ | 325 | /* drop old messages until we have enough contiuous space */ |
| @@ -327,7 +327,7 @@ static void log_store(int facility, int level, | |||
| 327 | log_first_seq++; | 327 | log_first_seq++; |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) { | 330 | if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { |
| 331 | /* | 331 | /* |
| 332 | * This message + an additional empty header does not fit | 332 | * This message + an additional empty header does not fit |
| 333 | * at the end of the buffer. Add an empty header with len == 0 | 333 | * at the end of the buffer. Add an empty header with len == 0 |
| @@ -351,7 +351,7 @@ static void log_store(int facility, int level, | |||
| 351 | else | 351 | else |
| 352 | msg->ts_nsec = local_clock(); | 352 | msg->ts_nsec = local_clock(); |
| 353 | memset(log_dict(msg) + dict_len, 0, pad_len); | 353 | memset(log_dict(msg) + dict_len, 0, pad_len); |
| 354 | msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len; | 354 | msg->len = size; |
| 355 | 355 | ||
| 356 | /* insert message */ | 356 | /* insert message */ |
| 357 | log_next_idx += msg->len; | 357 | log_next_idx += msg->len; |
| @@ -1560,9 +1560,12 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
| 1560 | level = kern_level - '0'; | 1560 | level = kern_level - '0'; |
| 1561 | case 'd': /* KERN_DEFAULT */ | 1561 | case 'd': /* KERN_DEFAULT */ |
| 1562 | lflags |= LOG_PREFIX; | 1562 | lflags |= LOG_PREFIX; |
| 1563 | case 'c': /* KERN_CONT */ | ||
| 1564 | break; | ||
| 1565 | } | 1563 | } |
| 1564 | /* | ||
| 1565 | * No need to check length here because vscnprintf | ||
| 1566 | * put '\0' at the end of the string. Only valid and | ||
| 1567 | * newly printed level is detected. | ||
| 1568 | */ | ||
| 1566 | text_len -= end_of_header - text; | 1569 | text_len -= end_of_header - text; |
| 1567 | text = (char *)end_of_header; | 1570 | text = (char *)end_of_header; |
| 1568 | } | 1571 | } |
| @@ -1880,6 +1883,7 @@ void suspend_console(void) | |||
| 1880 | console_lock(); | 1883 | console_lock(); |
| 1881 | console_suspended = 1; | 1884 | console_suspended = 1; |
| 1882 | up(&console_sem); | 1885 | up(&console_sem); |
| 1886 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
| 1883 | } | 1887 | } |
| 1884 | 1888 | ||
| 1885 | void resume_console(void) | 1889 | void resume_console(void) |
| @@ -1887,6 +1891,7 @@ void resume_console(void) | |||
| 1887 | if (!console_suspend_enabled) | 1891 | if (!console_suspend_enabled) |
| 1888 | return; | 1892 | return; |
| 1889 | down(&console_sem); | 1893 | down(&console_sem); |
| 1894 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
| 1890 | console_suspended = 0; | 1895 | console_suspended = 0; |
| 1891 | console_unlock(); | 1896 | console_unlock(); |
| 1892 | } | 1897 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..cb980f0c731b 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
| 549 | struct page *page; | 549 | struct page *page; |
| 550 | 550 | ||
| 551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
| 552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 553 | 0); | 553 | 0); |
| 554 | if (!page) | 554 | if (!page) |
| 555 | goto out_cleanup; | 555 | goto out_cleanup; |
| 556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
| 557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
| 558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
| 559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 560 | 0); | 560 | 0); |
| 561 | if (!page) | 561 | if (!page) |
| 562 | goto out_cleanup; | 562 | goto out_cleanup; |
| @@ -591,18 +591,28 @@ out_cleanup: | |||
| 591 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ | 591 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ |
| 592 | { | 592 | { |
| 593 | struct proc_dir_entry *entry; | 593 | struct proc_dir_entry *entry; |
| 594 | int err = 0; | ||
| 594 | 595 | ||
| 595 | if (!prof_on) | 596 | if (!prof_on) |
| 596 | return 0; | 597 | return 0; |
| 597 | if (create_hash_tables()) | 598 | |
| 598 | return -ENOMEM; | 599 | cpu_notifier_register_begin(); |
| 600 | |||
| 601 | if (create_hash_tables()) { | ||
| 602 | err = -ENOMEM; | ||
| 603 | goto out; | ||
| 604 | } | ||
| 605 | |||
| 599 | entry = proc_create("profile", S_IWUSR | S_IRUGO, | 606 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
| 600 | NULL, &proc_profile_operations); | 607 | NULL, &proc_profile_operations); |
| 601 | if (!entry) | 608 | if (!entry) |
| 602 | return 0; | 609 | goto out; |
| 603 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); | 610 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
| 604 | hotcpu_notifier(profile_cpu_callback, 0); | 611 | __hotcpu_notifier(profile_cpu_callback, 0); |
| 605 | return 0; | 612 | |
| 613 | out: | ||
| 614 | cpu_notifier_register_done(); | ||
| 615 | return err; | ||
| 606 | } | 616 | } |
| 607 | module_init(create_proc_profile); | 617 | subsys_initcall(create_proc_profile); |
| 608 | #endif /* CONFIG_PROC_FS */ | 618 | #endif /* CONFIG_PROC_FS */ |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1f4bcb3cc21c..adf98622cb32 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -1180,8 +1180,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |||
| 1180 | return ret; | 1180 | return ret; |
| 1181 | } | 1181 | } |
| 1182 | 1182 | ||
| 1183 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | 1183 | COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, |
| 1184 | compat_long_t addr, compat_long_t data) | 1184 | compat_long_t, addr, compat_long_t, data) |
| 1185 | { | 1185 | { |
| 1186 | struct task_struct *child; | 1186 | struct task_struct *child; |
| 1187 | long ret; | 1187 | long ret; |
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index 01e9ec37a3e3..807ccfbf69b3 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | obj-y += update.o srcu.o | 1 | obj-y += update.o srcu.o |
| 2 | obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o | 2 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
| 3 | obj-$(CONFIG_TREE_RCU) += tree.o | 3 | obj-$(CONFIG_TREE_RCU) += tree.o |
| 4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o | 4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o |
| 5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o | 5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o |
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 79c3877e9c5b..bfda2726ca45 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright IBM Corporation, 2011 | 18 | * Copyright IBM Corporation, 2011 |
| 19 | * | 19 | * |
| @@ -23,6 +23,7 @@ | |||
| 23 | #ifndef __LINUX_RCU_H | 23 | #ifndef __LINUX_RCU_H |
| 24 | #define __LINUX_RCU_H | 24 | #define __LINUX_RCU_H |
| 25 | 25 | ||
| 26 | #include <trace/events/rcu.h> | ||
| 26 | #ifdef CONFIG_RCU_TRACE | 27 | #ifdef CONFIG_RCU_TRACE |
| 27 | #define RCU_TRACE(stmt) stmt | 28 | #define RCU_TRACE(stmt) stmt |
| 28 | #else /* #ifdef CONFIG_RCU_TRACE */ | 29 | #else /* #ifdef CONFIG_RCU_TRACE */ |
| @@ -116,8 +117,6 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) | |||
| 116 | } | 117 | } |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | extern int rcu_expedited; | ||
| 120 | |||
| 121 | #ifdef CONFIG_RCU_STALL_COMMON | 120 | #ifdef CONFIG_RCU_STALL_COMMON |
| 122 | 121 | ||
| 123 | extern int rcu_cpu_stall_suppress; | 122 | extern int rcu_cpu_stall_suppress; |
diff --git a/kernel/rcu/torture.c b/kernel/rcu/rcutorture.c index 732f8ae3086a..bd30bc61bc05 100644 --- a/kernel/rcu/torture.c +++ b/kernel/rcu/rcutorture.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2005, 2006 | 18 | * Copyright (C) IBM Corporation, 2005, 2006 |
| 19 | * | 19 | * |
| @@ -48,110 +48,58 @@ | |||
| 48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
| 49 | #include <linux/trace_clock.h> | 49 | #include <linux/trace_clock.h> |
| 50 | #include <asm/byteorder.h> | 50 | #include <asm/byteorder.h> |
| 51 | #include <linux/torture.h> | ||
| 51 | 52 | ||
| 52 | MODULE_LICENSE("GPL"); | 53 | MODULE_LICENSE("GPL"); |
| 53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); | 54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); |
| 54 | 55 | ||
| 55 | MODULE_ALIAS("rcutorture"); | 56 | |
| 56 | #ifdef MODULE_PARAM_PREFIX | 57 | torture_param(int, fqs_duration, 0, |
| 57 | #undef MODULE_PARAM_PREFIX | 58 | "Duration of fqs bursts (us), 0 to disable"); |
| 58 | #endif | 59 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
| 59 | #define MODULE_PARAM_PREFIX "rcutorture." | 60 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
| 60 | 61 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); | |
| 61 | static int fqs_duration; | 62 | torture_param(bool, gp_normal, false, |
| 62 | module_param(fqs_duration, int, 0444); | 63 | "Use normal (non-expedited) GP wait primitives"); |
| 63 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); | 64 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
| 64 | static int fqs_holdoff; | 65 | torture_param(int, n_barrier_cbs, 0, |
| 65 | module_param(fqs_holdoff, int, 0444); | 66 | "# of callbacks/kthreads for barrier testing"); |
| 66 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | 67 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
| 67 | static int fqs_stutter = 3; | 68 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
| 68 | module_param(fqs_stutter, int, 0444); | 69 | torture_param(int, object_debug, 0, |
| 69 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | 70 | "Enable debug-object double call_rcu() testing"); |
| 70 | static bool gp_exp; | 71 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
| 71 | module_param(gp_exp, bool, 0444); | 72 | torture_param(int, onoff_interval, 0, |
| 72 | MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives"); | 73 | "Time between CPU hotplugs (s), 0=disable"); |
| 73 | static bool gp_normal; | 74 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
| 74 | module_param(gp_normal, bool, 0444); | 75 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
| 75 | MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives"); | 76 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
| 76 | static int irqreader = 1; | 77 | torture_param(int, stall_cpu_holdoff, 10, |
| 77 | module_param(irqreader, int, 0444); | 78 | "Time to wait before starting stall (s)."); |
| 78 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | 79 | torture_param(int, stat_interval, 60, |
| 79 | static int n_barrier_cbs; | 80 | "Number of seconds between stats printk()s"); |
| 80 | module_param(n_barrier_cbs, int, 0444); | 81 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
| 81 | MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing"); | 82 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
| 82 | static int nfakewriters = 4; | 83 | torture_param(int, test_boost_duration, 4, |
| 83 | module_param(nfakewriters, int, 0444); | 84 | "Duration of each boost test, seconds."); |
| 84 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | 85 | torture_param(int, test_boost_interval, 7, |
| 85 | static int nreaders = -1; | 86 | "Interval between boost tests, seconds."); |
| 86 | module_param(nreaders, int, 0444); | 87 | torture_param(bool, test_no_idle_hz, true, |
| 87 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | 88 | "Test support for tickless idle CPUs"); |
| 88 | static int object_debug; | 89 | torture_param(bool, verbose, true, |
| 89 | module_param(object_debug, int, 0444); | 90 | "Enable verbose debugging printk()s"); |
| 90 | MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing"); | 91 | |
| 91 | static int onoff_holdoff; | ||
| 92 | module_param(onoff_holdoff, int, 0444); | ||
| 93 | MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)"); | ||
| 94 | static int onoff_interval; | ||
| 95 | module_param(onoff_interval, int, 0444); | ||
| 96 | MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); | ||
| 97 | static int shuffle_interval = 3; | ||
| 98 | module_param(shuffle_interval, int, 0444); | ||
| 99 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | ||
| 100 | static int shutdown_secs; | ||
| 101 | module_param(shutdown_secs, int, 0444); | ||
| 102 | MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable."); | ||
| 103 | static int stall_cpu; | ||
| 104 | module_param(stall_cpu, int, 0444); | ||
| 105 | MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable."); | ||
| 106 | static int stall_cpu_holdoff = 10; | ||
| 107 | module_param(stall_cpu_holdoff, int, 0444); | ||
| 108 | MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s)."); | ||
| 109 | static int stat_interval = 60; | ||
| 110 | module_param(stat_interval, int, 0644); | ||
| 111 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | ||
| 112 | static int stutter = 5; | ||
| 113 | module_param(stutter, int, 0444); | ||
| 114 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | ||
| 115 | static int test_boost = 1; | ||
| 116 | module_param(test_boost, int, 0444); | ||
| 117 | MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); | ||
| 118 | static int test_boost_duration = 4; | ||
| 119 | module_param(test_boost_duration, int, 0444); | ||
| 120 | MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); | ||
| 121 | static int test_boost_interval = 7; | ||
| 122 | module_param(test_boost_interval, int, 0444); | ||
| 123 | MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); | ||
| 124 | static bool test_no_idle_hz = true; | ||
| 125 | module_param(test_no_idle_hz, bool, 0444); | ||
| 126 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | ||
| 127 | static char *torture_type = "rcu"; | 92 | static char *torture_type = "rcu"; |
| 128 | module_param(torture_type, charp, 0444); | 93 | module_param(torture_type, charp, 0444); |
| 129 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); | 94 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
| 130 | static bool verbose; | ||
| 131 | module_param(verbose, bool, 0444); | ||
| 132 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | ||
| 133 | |||
| 134 | #define TORTURE_FLAG "-torture:" | ||
| 135 | #define PRINTK_STRING(s) \ | ||
| 136 | do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) | ||
| 137 | #define VERBOSE_PRINTK_STRING(s) \ | ||
| 138 | do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) | ||
| 139 | #define VERBOSE_PRINTK_ERRSTRING(s) \ | ||
| 140 | do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) | ||
| 141 | 95 | ||
| 142 | static int nrealreaders; | 96 | static int nrealreaders; |
| 143 | static struct task_struct *writer_task; | 97 | static struct task_struct *writer_task; |
| 144 | static struct task_struct **fakewriter_tasks; | 98 | static struct task_struct **fakewriter_tasks; |
| 145 | static struct task_struct **reader_tasks; | 99 | static struct task_struct **reader_tasks; |
| 146 | static struct task_struct *stats_task; | 100 | static struct task_struct *stats_task; |
| 147 | static struct task_struct *shuffler_task; | ||
| 148 | static struct task_struct *stutter_task; | ||
| 149 | static struct task_struct *fqs_task; | 101 | static struct task_struct *fqs_task; |
| 150 | static struct task_struct *boost_tasks[NR_CPUS]; | 102 | static struct task_struct *boost_tasks[NR_CPUS]; |
| 151 | static struct task_struct *shutdown_task; | ||
| 152 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 153 | static struct task_struct *onoff_task; | ||
| 154 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 155 | static struct task_struct *stall_task; | 103 | static struct task_struct *stall_task; |
| 156 | static struct task_struct **barrier_cbs_tasks; | 104 | static struct task_struct **barrier_cbs_tasks; |
| 157 | static struct task_struct *barrier_task; | 105 | static struct task_struct *barrier_task; |
| @@ -170,10 +118,10 @@ static struct rcu_torture __rcu *rcu_torture_current; | |||
| 170 | static unsigned long rcu_torture_current_version; | 118 | static unsigned long rcu_torture_current_version; |
| 171 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 119 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 172 | static DEFINE_SPINLOCK(rcu_torture_lock); | 120 | static DEFINE_SPINLOCK(rcu_torture_lock); |
| 173 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | 121 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], |
| 174 | { 0 }; | 122 | rcu_torture_count) = { 0 }; |
| 175 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = | 123 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], |
| 176 | { 0 }; | 124 | rcu_torture_batch) = { 0 }; |
| 177 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; | 125 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
| 178 | static atomic_t n_rcu_torture_alloc; | 126 | static atomic_t n_rcu_torture_alloc; |
| 179 | static atomic_t n_rcu_torture_alloc_fail; | 127 | static atomic_t n_rcu_torture_alloc_fail; |
| @@ -186,22 +134,9 @@ static long n_rcu_torture_boost_rterror; | |||
| 186 | static long n_rcu_torture_boost_failure; | 134 | static long n_rcu_torture_boost_failure; |
| 187 | static long n_rcu_torture_boosts; | 135 | static long n_rcu_torture_boosts; |
| 188 | static long n_rcu_torture_timers; | 136 | static long n_rcu_torture_timers; |
| 189 | static long n_offline_attempts; | ||
| 190 | static long n_offline_successes; | ||
| 191 | static unsigned long sum_offline; | ||
| 192 | static int min_offline = -1; | ||
| 193 | static int max_offline; | ||
| 194 | static long n_online_attempts; | ||
| 195 | static long n_online_successes; | ||
| 196 | static unsigned long sum_online; | ||
| 197 | static int min_online = -1; | ||
| 198 | static int max_online; | ||
| 199 | static long n_barrier_attempts; | 137 | static long n_barrier_attempts; |
| 200 | static long n_barrier_successes; | 138 | static long n_barrier_successes; |
| 201 | static struct list_head rcu_torture_removed; | 139 | static struct list_head rcu_torture_removed; |
| 202 | static cpumask_var_t shuffle_tmp_mask; | ||
| 203 | |||
| 204 | static int stutter_pause_test; | ||
| 205 | 140 | ||
| 206 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | 141 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) |
| 207 | #define RCUTORTURE_RUNNABLE_INIT 1 | 142 | #define RCUTORTURE_RUNNABLE_INIT 1 |
| @@ -232,7 +167,6 @@ static u64 notrace rcu_trace_clock_local(void) | |||
| 232 | } | 167 | } |
| 233 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 168 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 234 | 169 | ||
| 235 | static unsigned long shutdown_time; /* jiffies to system shutdown. */ | ||
| 236 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ | 170 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
| 237 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | 171 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
| 238 | /* and boost task create/destroy. */ | 172 | /* and boost task create/destroy. */ |
| @@ -242,51 +176,6 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ | |||
| 242 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ | 176 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
| 243 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); | 177 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
| 244 | 178 | ||
| 245 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ | ||
| 246 | |||
| 247 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ | ||
| 248 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ | ||
| 249 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ | ||
| 250 | static int fullstop = FULLSTOP_RMMOD; | ||
| 251 | /* | ||
| 252 | * Protect fullstop transitions and spawning of kthreads. | ||
| 253 | */ | ||
| 254 | static DEFINE_MUTEX(fullstop_mutex); | ||
| 255 | |||
| 256 | /* Forward reference. */ | ||
| 257 | static void rcu_torture_cleanup(void); | ||
| 258 | |||
| 259 | /* | ||
| 260 | * Detect and respond to a system shutdown. | ||
| 261 | */ | ||
| 262 | static int | ||
| 263 | rcutorture_shutdown_notify(struct notifier_block *unused1, | ||
| 264 | unsigned long unused2, void *unused3) | ||
| 265 | { | ||
| 266 | mutex_lock(&fullstop_mutex); | ||
| 267 | if (fullstop == FULLSTOP_DONTSTOP) | ||
| 268 | fullstop = FULLSTOP_SHUTDOWN; | ||
| 269 | else | ||
| 270 | pr_warn(/* but going down anyway, so... */ | ||
| 271 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
| 272 | mutex_unlock(&fullstop_mutex); | ||
| 273 | return NOTIFY_DONE; | ||
| 274 | } | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Absorb kthreads into a kernel function that won't return, so that | ||
| 278 | * they won't ever access module text or data again. | ||
| 279 | */ | ||
| 280 | static void rcutorture_shutdown_absorb(const char *title) | ||
| 281 | { | ||
| 282 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
| 283 | pr_notice( | ||
| 284 | "rcutorture thread %s parking due to system shutdown\n", | ||
| 285 | title); | ||
| 286 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
| 287 | } | ||
| 288 | } | ||
| 289 | |||
| 290 | /* | 179 | /* |
| 291 | * Allocate an element from the rcu_tortures pool. | 180 | * Allocate an element from the rcu_tortures pool. |
| 292 | */ | 181 | */ |
| @@ -320,44 +209,6 @@ rcu_torture_free(struct rcu_torture *p) | |||
| 320 | spin_unlock_bh(&rcu_torture_lock); | 209 | spin_unlock_bh(&rcu_torture_lock); |
| 321 | } | 210 | } |
| 322 | 211 | ||
| 323 | struct rcu_random_state { | ||
| 324 | unsigned long rrs_state; | ||
| 325 | long rrs_count; | ||
| 326 | }; | ||
| 327 | |||
| 328 | #define RCU_RANDOM_MULT 39916801 /* prime */ | ||
| 329 | #define RCU_RANDOM_ADD 479001701 /* prime */ | ||
| 330 | #define RCU_RANDOM_REFRESH 10000 | ||
| 331 | |||
| 332 | #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 } | ||
| 333 | |||
| 334 | /* | ||
| 335 | * Crude but fast random-number generator. Uses a linear congruential | ||
| 336 | * generator, with occasional help from cpu_clock(). | ||
| 337 | */ | ||
| 338 | static unsigned long | ||
| 339 | rcu_random(struct rcu_random_state *rrsp) | ||
| 340 | { | ||
| 341 | if (--rrsp->rrs_count < 0) { | ||
| 342 | rrsp->rrs_state += (unsigned long)local_clock(); | ||
| 343 | rrsp->rrs_count = RCU_RANDOM_REFRESH; | ||
| 344 | } | ||
| 345 | rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; | ||
| 346 | return swahw32(rrsp->rrs_state); | ||
| 347 | } | ||
| 348 | |||
| 349 | static void | ||
| 350 | rcu_stutter_wait(const char *title) | ||
| 351 | { | ||
| 352 | while (stutter_pause_test || !rcutorture_runnable) { | ||
| 353 | if (rcutorture_runnable) | ||
| 354 | schedule_timeout_interruptible(1); | ||
| 355 | else | ||
| 356 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | ||
| 357 | rcutorture_shutdown_absorb(title); | ||
| 358 | } | ||
| 359 | } | ||
| 360 | |||
| 361 | /* | 212 | /* |
| 362 | * Operations vector for selecting different types of tests. | 213 | * Operations vector for selecting different types of tests. |
| 363 | */ | 214 | */ |
| @@ -365,7 +216,7 @@ rcu_stutter_wait(const char *title) | |||
| 365 | struct rcu_torture_ops { | 216 | struct rcu_torture_ops { |
| 366 | void (*init)(void); | 217 | void (*init)(void); |
| 367 | int (*readlock)(void); | 218 | int (*readlock)(void); |
| 368 | void (*read_delay)(struct rcu_random_state *rrsp); | 219 | void (*read_delay)(struct torture_random_state *rrsp); |
| 369 | void (*readunlock)(int idx); | 220 | void (*readunlock)(int idx); |
| 370 | int (*completed)(void); | 221 | int (*completed)(void); |
| 371 | void (*deferred_free)(struct rcu_torture *p); | 222 | void (*deferred_free)(struct rcu_torture *p); |
| @@ -392,7 +243,7 @@ static int rcu_torture_read_lock(void) __acquires(RCU) | |||
| 392 | return 0; | 243 | return 0; |
| 393 | } | 244 | } |
| 394 | 245 | ||
| 395 | static void rcu_read_delay(struct rcu_random_state *rrsp) | 246 | static void rcu_read_delay(struct torture_random_state *rrsp) |
| 396 | { | 247 | { |
| 397 | const unsigned long shortdelay_us = 200; | 248 | const unsigned long shortdelay_us = 200; |
| 398 | const unsigned long longdelay_ms = 50; | 249 | const unsigned long longdelay_ms = 50; |
| @@ -401,12 +252,13 @@ static void rcu_read_delay(struct rcu_random_state *rrsp) | |||
| 401 | * period, and we want a long delay occasionally to trigger | 252 | * period, and we want a long delay occasionally to trigger |
| 402 | * force_quiescent_state. */ | 253 | * force_quiescent_state. */ |
| 403 | 254 | ||
| 404 | if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) | 255 | if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) |
| 405 | mdelay(longdelay_ms); | 256 | mdelay(longdelay_ms); |
| 406 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) | 257 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
| 407 | udelay(shortdelay_us); | 258 | udelay(shortdelay_us); |
| 408 | #ifdef CONFIG_PREEMPT | 259 | #ifdef CONFIG_PREEMPT |
| 409 | if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000))) | 260 | if (!preempt_count() && |
| 261 | !(torture_random(rrsp) % (nrealreaders * 20000))) | ||
| 410 | preempt_schedule(); /* No QS if preempt_disable() in effect */ | 262 | preempt_schedule(); /* No QS if preempt_disable() in effect */ |
| 411 | #endif | 263 | #endif |
| 412 | } | 264 | } |
| @@ -427,7 +279,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
| 427 | int i; | 279 | int i; |
| 428 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | 280 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 429 | 281 | ||
| 430 | if (fullstop != FULLSTOP_DONTSTOP) { | 282 | if (torture_must_stop_irq()) { |
| 431 | /* Test is ending, just drop callbacks on the floor. */ | 283 | /* Test is ending, just drop callbacks on the floor. */ |
| 432 | /* The next initialization will pick up the pieces. */ | 284 | /* The next initialization will pick up the pieces. */ |
| 433 | return; | 285 | return; |
| @@ -520,6 +372,48 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
| 520 | }; | 372 | }; |
| 521 | 373 | ||
| 522 | /* | 374 | /* |
| 375 | * Don't even think about trying any of these in real life!!! | ||
| 376 | * The names includes "busted", and they really means it! | ||
| 377 | * The only purpose of these functions is to provide a buggy RCU | ||
| 378 | * implementation to make sure that rcutorture correctly emits | ||
| 379 | * buggy-RCU error messages. | ||
| 380 | */ | ||
| 381 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) | ||
| 382 | { | ||
| 383 | /* This is a deliberate bug for testing purposes only! */ | ||
| 384 | rcu_torture_cb(&p->rtort_rcu); | ||
| 385 | } | ||
| 386 | |||
| 387 | static void synchronize_rcu_busted(void) | ||
| 388 | { | ||
| 389 | /* This is a deliberate bug for testing purposes only! */ | ||
| 390 | } | ||
| 391 | |||
| 392 | static void | ||
| 393 | call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 394 | { | ||
| 395 | /* This is a deliberate bug for testing purposes only! */ | ||
| 396 | func(head); | ||
| 397 | } | ||
| 398 | |||
| 399 | static struct rcu_torture_ops rcu_busted_ops = { | ||
| 400 | .init = rcu_sync_torture_init, | ||
| 401 | .readlock = rcu_torture_read_lock, | ||
| 402 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
| 403 | .readunlock = rcu_torture_read_unlock, | ||
| 404 | .completed = rcu_no_completed, | ||
| 405 | .deferred_free = rcu_busted_torture_deferred_free, | ||
| 406 | .sync = synchronize_rcu_busted, | ||
| 407 | .exp_sync = synchronize_rcu_busted, | ||
| 408 | .call = call_rcu_busted, | ||
| 409 | .cb_barrier = NULL, | ||
| 410 | .fqs = NULL, | ||
| 411 | .stats = NULL, | ||
| 412 | .irq_capable = 1, | ||
| 413 | .name = "rcu_busted" | ||
| 414 | }; | ||
| 415 | |||
| 416 | /* | ||
| 523 | * Definitions for srcu torture testing. | 417 | * Definitions for srcu torture testing. |
| 524 | */ | 418 | */ |
| 525 | 419 | ||
| @@ -530,7 +424,7 @@ static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) | |||
| 530 | return srcu_read_lock(&srcu_ctl); | 424 | return srcu_read_lock(&srcu_ctl); |
| 531 | } | 425 | } |
| 532 | 426 | ||
| 533 | static void srcu_read_delay(struct rcu_random_state *rrsp) | 427 | static void srcu_read_delay(struct torture_random_state *rrsp) |
| 534 | { | 428 | { |
| 535 | long delay; | 429 | long delay; |
| 536 | const long uspertick = 1000000 / HZ; | 430 | const long uspertick = 1000000 / HZ; |
| @@ -538,7 +432,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp) | |||
| 538 | 432 | ||
| 539 | /* We want there to be long-running readers, but not all the time. */ | 433 | /* We want there to be long-running readers, but not all the time. */ |
| 540 | 434 | ||
| 541 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); | 435 | delay = torture_random(rrsp) % |
| 436 | (nrealreaders * 2 * longdelay * uspertick); | ||
| 542 | if (!delay) | 437 | if (!delay) |
| 543 | schedule_timeout_interruptible(longdelay); | 438 | schedule_timeout_interruptible(longdelay); |
| 544 | else | 439 | else |
| @@ -677,12 +572,12 @@ static int rcu_torture_boost(void *arg) | |||
| 677 | struct rcu_boost_inflight rbi = { .inflight = 0 }; | 572 | struct rcu_boost_inflight rbi = { .inflight = 0 }; |
| 678 | struct sched_param sp; | 573 | struct sched_param sp; |
| 679 | 574 | ||
| 680 | VERBOSE_PRINTK_STRING("rcu_torture_boost started"); | 575 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
| 681 | 576 | ||
| 682 | /* Set real-time priority. */ | 577 | /* Set real-time priority. */ |
| 683 | sp.sched_priority = 1; | 578 | sp.sched_priority = 1; |
| 684 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { | 579 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { |
| 685 | VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!"); | 580 | VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); |
| 686 | n_rcu_torture_boost_rterror++; | 581 | n_rcu_torture_boost_rterror++; |
| 687 | } | 582 | } |
| 688 | 583 | ||
| @@ -693,9 +588,8 @@ static int rcu_torture_boost(void *arg) | |||
| 693 | oldstarttime = boost_starttime; | 588 | oldstarttime = boost_starttime; |
| 694 | while (ULONG_CMP_LT(jiffies, oldstarttime)) { | 589 | while (ULONG_CMP_LT(jiffies, oldstarttime)) { |
| 695 | schedule_timeout_interruptible(oldstarttime - jiffies); | 590 | schedule_timeout_interruptible(oldstarttime - jiffies); |
| 696 | rcu_stutter_wait("rcu_torture_boost"); | 591 | stutter_wait("rcu_torture_boost"); |
| 697 | if (kthread_should_stop() || | 592 | if (torture_must_stop()) |
| 698 | fullstop != FULLSTOP_DONTSTOP) | ||
| 699 | goto checkwait; | 593 | goto checkwait; |
| 700 | } | 594 | } |
| 701 | 595 | ||
| @@ -710,15 +604,14 @@ static int rcu_torture_boost(void *arg) | |||
| 710 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); | 604 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); |
| 711 | if (jiffies - call_rcu_time > | 605 | if (jiffies - call_rcu_time > |
| 712 | test_boost_duration * HZ - HZ / 2) { | 606 | test_boost_duration * HZ - HZ / 2) { |
| 713 | VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed"); | 607 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
| 714 | n_rcu_torture_boost_failure++; | 608 | n_rcu_torture_boost_failure++; |
| 715 | } | 609 | } |
| 716 | call_rcu_time = jiffies; | 610 | call_rcu_time = jiffies; |
| 717 | } | 611 | } |
| 718 | cond_resched(); | 612 | cond_resched(); |
| 719 | rcu_stutter_wait("rcu_torture_boost"); | 613 | stutter_wait("rcu_torture_boost"); |
| 720 | if (kthread_should_stop() || | 614 | if (torture_must_stop()) |
| 721 | fullstop != FULLSTOP_DONTSTOP) | ||
| 722 | goto checkwait; | 615 | goto checkwait; |
| 723 | } | 616 | } |
| 724 | 617 | ||
| @@ -742,16 +635,17 @@ static int rcu_torture_boost(void *arg) | |||
| 742 | } | 635 | } |
| 743 | 636 | ||
| 744 | /* Go do the stutter. */ | 637 | /* Go do the stutter. */ |
| 745 | checkwait: rcu_stutter_wait("rcu_torture_boost"); | 638 | checkwait: stutter_wait("rcu_torture_boost"); |
| 746 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 639 | } while (!torture_must_stop()); |
| 747 | 640 | ||
| 748 | /* Clean up and exit. */ | 641 | /* Clean up and exit. */ |
| 749 | VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); | 642 | while (!kthread_should_stop() || rbi.inflight) { |
| 750 | rcutorture_shutdown_absorb("rcu_torture_boost"); | 643 | torture_shutdown_absorb("rcu_torture_boost"); |
| 751 | while (!kthread_should_stop() || rbi.inflight) | ||
| 752 | schedule_timeout_uninterruptible(1); | 644 | schedule_timeout_uninterruptible(1); |
| 645 | } | ||
| 753 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ | 646 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ |
| 754 | destroy_rcu_head_on_stack(&rbi.rcu); | 647 | destroy_rcu_head_on_stack(&rbi.rcu); |
| 648 | torture_kthread_stopping("rcu_torture_boost"); | ||
| 755 | return 0; | 649 | return 0; |
| 756 | } | 650 | } |
| 757 | 651 | ||
| @@ -766,7 +660,7 @@ rcu_torture_fqs(void *arg) | |||
| 766 | unsigned long fqs_resume_time; | 660 | unsigned long fqs_resume_time; |
| 767 | int fqs_burst_remaining; | 661 | int fqs_burst_remaining; |
| 768 | 662 | ||
| 769 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); | 663 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
| 770 | do { | 664 | do { |
| 771 | fqs_resume_time = jiffies + fqs_stutter * HZ; | 665 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
| 772 | while (ULONG_CMP_LT(jiffies, fqs_resume_time) && | 666 | while (ULONG_CMP_LT(jiffies, fqs_resume_time) && |
| @@ -780,12 +674,9 @@ rcu_torture_fqs(void *arg) | |||
| 780 | udelay(fqs_holdoff); | 674 | udelay(fqs_holdoff); |
| 781 | fqs_burst_remaining -= fqs_holdoff; | 675 | fqs_burst_remaining -= fqs_holdoff; |
| 782 | } | 676 | } |
| 783 | rcu_stutter_wait("rcu_torture_fqs"); | 677 | stutter_wait("rcu_torture_fqs"); |
| 784 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 678 | } while (!torture_must_stop()); |
| 785 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping"); | 679 | torture_kthread_stopping("rcu_torture_fqs"); |
| 786 | rcutorture_shutdown_absorb("rcu_torture_fqs"); | ||
| 787 | while (!kthread_should_stop()) | ||
| 788 | schedule_timeout_uninterruptible(1); | ||
| 789 | return 0; | 680 | return 0; |
| 790 | } | 681 | } |
| 791 | 682 | ||
| @@ -802,10 +693,10 @@ rcu_torture_writer(void *arg) | |||
| 802 | struct rcu_torture *rp; | 693 | struct rcu_torture *rp; |
| 803 | struct rcu_torture *rp1; | 694 | struct rcu_torture *rp1; |
| 804 | struct rcu_torture *old_rp; | 695 | struct rcu_torture *old_rp; |
| 805 | static DEFINE_RCU_RANDOM(rand); | 696 | static DEFINE_TORTURE_RANDOM(rand); |
| 806 | 697 | ||
| 807 | VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); | 698 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
| 808 | set_user_nice(current, 19); | 699 | set_user_nice(current, MAX_NICE); |
| 809 | 700 | ||
| 810 | do { | 701 | do { |
| 811 | schedule_timeout_uninterruptible(1); | 702 | schedule_timeout_uninterruptible(1); |
| @@ -813,7 +704,7 @@ rcu_torture_writer(void *arg) | |||
| 813 | if (rp == NULL) | 704 | if (rp == NULL) |
| 814 | continue; | 705 | continue; |
| 815 | rp->rtort_pipe_count = 0; | 706 | rp->rtort_pipe_count = 0; |
| 816 | udelay(rcu_random(&rand) & 0x3ff); | 707 | udelay(torture_random(&rand) & 0x3ff); |
| 817 | old_rp = rcu_dereference_check(rcu_torture_current, | 708 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 818 | current == writer_task); | 709 | current == writer_task); |
| 819 | rp->rtort_mbtest = 1; | 710 | rp->rtort_mbtest = 1; |
| @@ -826,7 +717,7 @@ rcu_torture_writer(void *arg) | |||
| 826 | atomic_inc(&rcu_torture_wcount[i]); | 717 | atomic_inc(&rcu_torture_wcount[i]); |
| 827 | old_rp->rtort_pipe_count++; | 718 | old_rp->rtort_pipe_count++; |
| 828 | if (gp_normal == gp_exp) | 719 | if (gp_normal == gp_exp) |
| 829 | exp = !!(rcu_random(&rand) & 0x80); | 720 | exp = !!(torture_random(&rand) & 0x80); |
| 830 | else | 721 | else |
| 831 | exp = gp_exp; | 722 | exp = gp_exp; |
| 832 | if (!exp) { | 723 | if (!exp) { |
| @@ -852,12 +743,9 @@ rcu_torture_writer(void *arg) | |||
| 852 | } | 743 | } |
| 853 | } | 744 | } |
| 854 | rcutorture_record_progress(++rcu_torture_current_version); | 745 | rcutorture_record_progress(++rcu_torture_current_version); |
| 855 | rcu_stutter_wait("rcu_torture_writer"); | 746 | stutter_wait("rcu_torture_writer"); |
| 856 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 747 | } while (!torture_must_stop()); |
| 857 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 748 | torture_kthread_stopping("rcu_torture_writer"); |
| 858 | rcutorture_shutdown_absorb("rcu_torture_writer"); | ||
| 859 | while (!kthread_should_stop()) | ||
| 860 | schedule_timeout_uninterruptible(1); | ||
| 861 | return 0; | 749 | return 0; |
| 862 | } | 750 | } |
| 863 | 751 | ||
| @@ -868,19 +756,19 @@ rcu_torture_writer(void *arg) | |||
| 868 | static int | 756 | static int |
| 869 | rcu_torture_fakewriter(void *arg) | 757 | rcu_torture_fakewriter(void *arg) |
| 870 | { | 758 | { |
| 871 | DEFINE_RCU_RANDOM(rand); | 759 | DEFINE_TORTURE_RANDOM(rand); |
| 872 | 760 | ||
| 873 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); | 761 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
| 874 | set_user_nice(current, 19); | 762 | set_user_nice(current, MAX_NICE); |
| 875 | 763 | ||
| 876 | do { | 764 | do { |
| 877 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 765 | schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); |
| 878 | udelay(rcu_random(&rand) & 0x3ff); | 766 | udelay(torture_random(&rand) & 0x3ff); |
| 879 | if (cur_ops->cb_barrier != NULL && | 767 | if (cur_ops->cb_barrier != NULL && |
| 880 | rcu_random(&rand) % (nfakewriters * 8) == 0) { | 768 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
| 881 | cur_ops->cb_barrier(); | 769 | cur_ops->cb_barrier(); |
| 882 | } else if (gp_normal == gp_exp) { | 770 | } else if (gp_normal == gp_exp) { |
| 883 | if (rcu_random(&rand) & 0x80) | 771 | if (torture_random(&rand) & 0x80) |
| 884 | cur_ops->sync(); | 772 | cur_ops->sync(); |
| 885 | else | 773 | else |
| 886 | cur_ops->exp_sync(); | 774 | cur_ops->exp_sync(); |
| @@ -889,13 +777,10 @@ rcu_torture_fakewriter(void *arg) | |||
| 889 | } else { | 777 | } else { |
| 890 | cur_ops->exp_sync(); | 778 | cur_ops->exp_sync(); |
| 891 | } | 779 | } |
| 892 | rcu_stutter_wait("rcu_torture_fakewriter"); | 780 | stutter_wait("rcu_torture_fakewriter"); |
| 893 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 781 | } while (!torture_must_stop()); |
| 894 | 782 | ||
| 895 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 783 | torture_kthread_stopping("rcu_torture_fakewriter"); |
| 896 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); | ||
| 897 | while (!kthread_should_stop()) | ||
| 898 | schedule_timeout_uninterruptible(1); | ||
| 899 | return 0; | 784 | return 0; |
| 900 | } | 785 | } |
| 901 | 786 | ||
| @@ -921,7 +806,7 @@ static void rcu_torture_timer(unsigned long unused) | |||
| 921 | int idx; | 806 | int idx; |
| 922 | int completed; | 807 | int completed; |
| 923 | int completed_end; | 808 | int completed_end; |
| 924 | static DEFINE_RCU_RANDOM(rand); | 809 | static DEFINE_TORTURE_RANDOM(rand); |
| 925 | static DEFINE_SPINLOCK(rand_lock); | 810 | static DEFINE_SPINLOCK(rand_lock); |
| 926 | struct rcu_torture *p; | 811 | struct rcu_torture *p; |
| 927 | int pipe_count; | 812 | int pipe_count; |
| @@ -980,14 +865,14 @@ rcu_torture_reader(void *arg) | |||
| 980 | int completed; | 865 | int completed; |
| 981 | int completed_end; | 866 | int completed_end; |
| 982 | int idx; | 867 | int idx; |
| 983 | DEFINE_RCU_RANDOM(rand); | 868 | DEFINE_TORTURE_RANDOM(rand); |
| 984 | struct rcu_torture *p; | 869 | struct rcu_torture *p; |
| 985 | int pipe_count; | 870 | int pipe_count; |
| 986 | struct timer_list t; | 871 | struct timer_list t; |
| 987 | unsigned long long ts; | 872 | unsigned long long ts; |
| 988 | 873 | ||
| 989 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 874 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
| 990 | set_user_nice(current, 19); | 875 | set_user_nice(current, MAX_NICE); |
| 991 | if (irqreader && cur_ops->irq_capable) | 876 | if (irqreader && cur_ops->irq_capable) |
| 992 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | 877 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
| 993 | 878 | ||
| @@ -1034,14 +919,11 @@ rcu_torture_reader(void *arg) | |||
| 1034 | preempt_enable(); | 919 | preempt_enable(); |
| 1035 | cur_ops->readunlock(idx); | 920 | cur_ops->readunlock(idx); |
| 1036 | schedule(); | 921 | schedule(); |
| 1037 | rcu_stutter_wait("rcu_torture_reader"); | 922 | stutter_wait("rcu_torture_reader"); |
| 1038 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 923 | } while (!torture_must_stop()); |
| 1039 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | ||
| 1040 | rcutorture_shutdown_absorb("rcu_torture_reader"); | ||
| 1041 | if (irqreader && cur_ops->irq_capable) | 924 | if (irqreader && cur_ops->irq_capable) |
| 1042 | del_timer_sync(&t); | 925 | del_timer_sync(&t); |
| 1043 | while (!kthread_should_stop()) | 926 | torture_kthread_stopping("rcu_torture_reader"); |
| 1044 | schedule_timeout_uninterruptible(1); | ||
| 1045 | return 0; | 927 | return 0; |
| 1046 | } | 928 | } |
| 1047 | 929 | ||
| @@ -1083,13 +965,7 @@ rcu_torture_printk(char *page) | |||
| 1083 | n_rcu_torture_boost_failure, | 965 | n_rcu_torture_boost_failure, |
| 1084 | n_rcu_torture_boosts, | 966 | n_rcu_torture_boosts, |
| 1085 | n_rcu_torture_timers); | 967 | n_rcu_torture_timers); |
| 1086 | page += sprintf(page, | 968 | page = torture_onoff_stats(page); |
| 1087 | "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", | ||
| 1088 | n_online_successes, n_online_attempts, | ||
| 1089 | n_offline_successes, n_offline_attempts, | ||
| 1090 | min_online, max_online, | ||
| 1091 | min_offline, max_offline, | ||
| 1092 | sum_online, sum_offline, HZ); | ||
| 1093 | page += sprintf(page, "barrier: %ld/%ld:%ld", | 969 | page += sprintf(page, "barrier: %ld/%ld:%ld", |
| 1094 | n_barrier_successes, | 970 | n_barrier_successes, |
| 1095 | n_barrier_attempts, | 971 | n_barrier_attempts, |
| @@ -1150,123 +1026,17 @@ rcu_torture_stats_print(void) | |||
| 1150 | /* | 1026 | /* |
| 1151 | * Periodically prints torture statistics, if periodic statistics printing | 1027 | * Periodically prints torture statistics, if periodic statistics printing |
| 1152 | * was specified via the stat_interval module parameter. | 1028 | * was specified via the stat_interval module parameter. |
| 1153 | * | ||
| 1154 | * No need to worry about fullstop here, since this one doesn't reference | ||
| 1155 | * volatile state or register callbacks. | ||
| 1156 | */ | 1029 | */ |
| 1157 | static int | 1030 | static int |
| 1158 | rcu_torture_stats(void *arg) | 1031 | rcu_torture_stats(void *arg) |
| 1159 | { | 1032 | { |
| 1160 | VERBOSE_PRINTK_STRING("rcu_torture_stats task started"); | 1033 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
| 1161 | do { | 1034 | do { |
| 1162 | schedule_timeout_interruptible(stat_interval * HZ); | 1035 | schedule_timeout_interruptible(stat_interval * HZ); |
| 1163 | rcu_torture_stats_print(); | 1036 | rcu_torture_stats_print(); |
| 1164 | rcutorture_shutdown_absorb("rcu_torture_stats"); | 1037 | torture_shutdown_absorb("rcu_torture_stats"); |
| 1165 | } while (!kthread_should_stop()); | 1038 | } while (!torture_must_stop()); |
| 1166 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 1039 | torture_kthread_stopping("rcu_torture_stats"); |
| 1167 | return 0; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | ||
| 1171 | |||
| 1172 | /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case | ||
| 1173 | * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. | ||
| 1174 | */ | ||
| 1175 | static void rcu_torture_shuffle_tasks(void) | ||
| 1176 | { | ||
| 1177 | int i; | ||
| 1178 | |||
| 1179 | cpumask_setall(shuffle_tmp_mask); | ||
| 1180 | get_online_cpus(); | ||
| 1181 | |||
| 1182 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | ||
| 1183 | if (num_online_cpus() == 1) { | ||
| 1184 | put_online_cpus(); | ||
| 1185 | return; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | if (rcu_idle_cpu != -1) | ||
| 1189 | cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); | ||
| 1190 | |||
| 1191 | set_cpus_allowed_ptr(current, shuffle_tmp_mask); | ||
| 1192 | |||
| 1193 | if (reader_tasks) { | ||
| 1194 | for (i = 0; i < nrealreaders; i++) | ||
| 1195 | if (reader_tasks[i]) | ||
| 1196 | set_cpus_allowed_ptr(reader_tasks[i], | ||
| 1197 | shuffle_tmp_mask); | ||
| 1198 | } | ||
| 1199 | if (fakewriter_tasks) { | ||
| 1200 | for (i = 0; i < nfakewriters; i++) | ||
| 1201 | if (fakewriter_tasks[i]) | ||
| 1202 | set_cpus_allowed_ptr(fakewriter_tasks[i], | ||
| 1203 | shuffle_tmp_mask); | ||
| 1204 | } | ||
| 1205 | if (writer_task) | ||
| 1206 | set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); | ||
| 1207 | if (stats_task) | ||
| 1208 | set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); | ||
| 1209 | if (stutter_task) | ||
| 1210 | set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); | ||
| 1211 | if (fqs_task) | ||
| 1212 | set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); | ||
| 1213 | if (shutdown_task) | ||
| 1214 | set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); | ||
| 1215 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1216 | if (onoff_task) | ||
| 1217 | set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); | ||
| 1218 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1219 | if (stall_task) | ||
| 1220 | set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); | ||
| 1221 | if (barrier_cbs_tasks) | ||
| 1222 | for (i = 0; i < n_barrier_cbs; i++) | ||
| 1223 | if (barrier_cbs_tasks[i]) | ||
| 1224 | set_cpus_allowed_ptr(barrier_cbs_tasks[i], | ||
| 1225 | shuffle_tmp_mask); | ||
| 1226 | if (barrier_task) | ||
| 1227 | set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); | ||
| 1228 | |||
| 1229 | if (rcu_idle_cpu == -1) | ||
| 1230 | rcu_idle_cpu = num_online_cpus() - 1; | ||
| 1231 | else | ||
| 1232 | rcu_idle_cpu--; | ||
| 1233 | |||
| 1234 | put_online_cpus(); | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | ||
| 1238 | * system to become idle at a time and cut off its timer ticks. This is meant | ||
| 1239 | * to test the support for such tickless idle CPU in RCU. | ||
| 1240 | */ | ||
| 1241 | static int | ||
| 1242 | rcu_torture_shuffle(void *arg) | ||
| 1243 | { | ||
| 1244 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started"); | ||
| 1245 | do { | ||
| 1246 | schedule_timeout_interruptible(shuffle_interval * HZ); | ||
| 1247 | rcu_torture_shuffle_tasks(); | ||
| 1248 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); | ||
| 1249 | } while (!kthread_should_stop()); | ||
| 1250 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | ||
| 1251 | return 0; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | /* Cause the rcutorture test to "stutter", starting and stopping all | ||
| 1255 | * threads periodically. | ||
| 1256 | */ | ||
| 1257 | static int | ||
| 1258 | rcu_torture_stutter(void *arg) | ||
| 1259 | { | ||
| 1260 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task started"); | ||
| 1261 | do { | ||
| 1262 | schedule_timeout_interruptible(stutter * HZ); | ||
| 1263 | stutter_pause_test = 1; | ||
| 1264 | if (!kthread_should_stop()) | ||
| 1265 | schedule_timeout_interruptible(stutter * HZ); | ||
| 1266 | stutter_pause_test = 0; | ||
| 1267 | rcutorture_shutdown_absorb("rcu_torture_stutter"); | ||
| 1268 | } while (!kthread_should_stop()); | ||
| 1269 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | ||
| 1270 | return 0; | 1040 | return 0; |
| 1271 | } | 1041 | } |
| 1272 | 1042 | ||
| @@ -1293,10 +1063,6 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) | |||
| 1293 | onoff_interval, onoff_holdoff); | 1063 | onoff_interval, onoff_holdoff); |
| 1294 | } | 1064 | } |
| 1295 | 1065 | ||
| 1296 | static struct notifier_block rcutorture_shutdown_nb = { | ||
| 1297 | .notifier_call = rcutorture_shutdown_notify, | ||
| 1298 | }; | ||
| 1299 | |||
| 1300 | static void rcutorture_booster_cleanup(int cpu) | 1066 | static void rcutorture_booster_cleanup(int cpu) |
| 1301 | { | 1067 | { |
| 1302 | struct task_struct *t; | 1068 | struct task_struct *t; |
| @@ -1304,14 +1070,12 @@ static void rcutorture_booster_cleanup(int cpu) | |||
| 1304 | if (boost_tasks[cpu] == NULL) | 1070 | if (boost_tasks[cpu] == NULL) |
| 1305 | return; | 1071 | return; |
| 1306 | mutex_lock(&boost_mutex); | 1072 | mutex_lock(&boost_mutex); |
| 1307 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task"); | ||
| 1308 | t = boost_tasks[cpu]; | 1073 | t = boost_tasks[cpu]; |
| 1309 | boost_tasks[cpu] = NULL; | 1074 | boost_tasks[cpu] = NULL; |
| 1310 | mutex_unlock(&boost_mutex); | 1075 | mutex_unlock(&boost_mutex); |
| 1311 | 1076 | ||
| 1312 | /* This must be outside of the mutex, otherwise deadlock! */ | 1077 | /* This must be outside of the mutex, otherwise deadlock! */ |
| 1313 | kthread_stop(t); | 1078 | torture_stop_kthread(rcu_torture_boost, t); |
| 1314 | boost_tasks[cpu] = NULL; | ||
| 1315 | } | 1079 | } |
| 1316 | 1080 | ||
| 1317 | static int rcutorture_booster_init(int cpu) | 1081 | static int rcutorture_booster_init(int cpu) |
| @@ -1323,13 +1087,13 @@ static int rcutorture_booster_init(int cpu) | |||
| 1323 | 1087 | ||
| 1324 | /* Don't allow time recalculation while creating a new task. */ | 1088 | /* Don't allow time recalculation while creating a new task. */ |
| 1325 | mutex_lock(&boost_mutex); | 1089 | mutex_lock(&boost_mutex); |
| 1326 | VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task"); | 1090 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
| 1327 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, | 1091 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, |
| 1328 | cpu_to_node(cpu), | 1092 | cpu_to_node(cpu), |
| 1329 | "rcu_torture_boost"); | 1093 | "rcu_torture_boost"); |
| 1330 | if (IS_ERR(boost_tasks[cpu])) { | 1094 | if (IS_ERR(boost_tasks[cpu])) { |
| 1331 | retval = PTR_ERR(boost_tasks[cpu]); | 1095 | retval = PTR_ERR(boost_tasks[cpu]); |
| 1332 | VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed"); | 1096 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
| 1333 | n_rcu_torture_boost_ktrerror++; | 1097 | n_rcu_torture_boost_ktrerror++; |
| 1334 | boost_tasks[cpu] = NULL; | 1098 | boost_tasks[cpu] = NULL; |
| 1335 | mutex_unlock(&boost_mutex); | 1099 | mutex_unlock(&boost_mutex); |
| @@ -1342,175 +1106,6 @@ static int rcutorture_booster_init(int cpu) | |||
| 1342 | } | 1106 | } |
| 1343 | 1107 | ||
| 1344 | /* | 1108 | /* |
| 1345 | * Cause the rcutorture test to shutdown the system after the test has | ||
| 1346 | * run for the time specified by the shutdown_secs module parameter. | ||
| 1347 | */ | ||
| 1348 | static int | ||
| 1349 | rcu_torture_shutdown(void *arg) | ||
| 1350 | { | ||
| 1351 | long delta; | ||
| 1352 | unsigned long jiffies_snap; | ||
| 1353 | |||
| 1354 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started"); | ||
| 1355 | jiffies_snap = ACCESS_ONCE(jiffies); | ||
| 1356 | while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && | ||
| 1357 | !kthread_should_stop()) { | ||
| 1358 | delta = shutdown_time - jiffies_snap; | ||
| 1359 | if (verbose) | ||
| 1360 | pr_alert("%s" TORTURE_FLAG | ||
| 1361 | "rcu_torture_shutdown task: %lu jiffies remaining\n", | ||
| 1362 | torture_type, delta); | ||
| 1363 | schedule_timeout_interruptible(delta); | ||
| 1364 | jiffies_snap = ACCESS_ONCE(jiffies); | ||
| 1365 | } | ||
| 1366 | if (kthread_should_stop()) { | ||
| 1367 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping"); | ||
| 1368 | return 0; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | /* OK, shut down the system. */ | ||
| 1372 | |||
| 1373 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system"); | ||
| 1374 | shutdown_task = NULL; /* Avoid self-kill deadlock. */ | ||
| 1375 | rcu_torture_cleanup(); /* Get the success/failure message. */ | ||
| 1376 | kernel_power_off(); /* Shut down the system. */ | ||
| 1377 | return 0; | ||
| 1378 | } | ||
| 1379 | |||
| 1380 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1381 | |||
| 1382 | /* | ||
| 1383 | * Execute random CPU-hotplug operations at the interval specified | ||
| 1384 | * by the onoff_interval. | ||
| 1385 | */ | ||
| 1386 | static int | ||
| 1387 | rcu_torture_onoff(void *arg) | ||
| 1388 | { | ||
| 1389 | int cpu; | ||
| 1390 | unsigned long delta; | ||
| 1391 | int maxcpu = -1; | ||
| 1392 | DEFINE_RCU_RANDOM(rand); | ||
| 1393 | int ret; | ||
| 1394 | unsigned long starttime; | ||
| 1395 | |||
| 1396 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); | ||
| 1397 | for_each_online_cpu(cpu) | ||
| 1398 | maxcpu = cpu; | ||
| 1399 | WARN_ON(maxcpu < 0); | ||
| 1400 | if (onoff_holdoff > 0) { | ||
| 1401 | VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff"); | ||
| 1402 | schedule_timeout_interruptible(onoff_holdoff * HZ); | ||
| 1403 | VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff"); | ||
| 1404 | } | ||
| 1405 | while (!kthread_should_stop()) { | ||
| 1406 | cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); | ||
| 1407 | if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { | ||
| 1408 | if (verbose) | ||
| 1409 | pr_alert("%s" TORTURE_FLAG | ||
| 1410 | "rcu_torture_onoff task: offlining %d\n", | ||
| 1411 | torture_type, cpu); | ||
| 1412 | starttime = jiffies; | ||
| 1413 | n_offline_attempts++; | ||
| 1414 | ret = cpu_down(cpu); | ||
| 1415 | if (ret) { | ||
| 1416 | if (verbose) | ||
| 1417 | pr_alert("%s" TORTURE_FLAG | ||
| 1418 | "rcu_torture_onoff task: offline %d failed: errno %d\n", | ||
| 1419 | torture_type, cpu, ret); | ||
| 1420 | } else { | ||
| 1421 | if (verbose) | ||
| 1422 | pr_alert("%s" TORTURE_FLAG | ||
| 1423 | "rcu_torture_onoff task: offlined %d\n", | ||
| 1424 | torture_type, cpu); | ||
| 1425 | n_offline_successes++; | ||
| 1426 | delta = jiffies - starttime; | ||
| 1427 | sum_offline += delta; | ||
| 1428 | if (min_offline < 0) { | ||
| 1429 | min_offline = delta; | ||
| 1430 | max_offline = delta; | ||
| 1431 | } | ||
| 1432 | if (min_offline > delta) | ||
| 1433 | min_offline = delta; | ||
| 1434 | if (max_offline < delta) | ||
| 1435 | max_offline = delta; | ||
| 1436 | } | ||
| 1437 | } else if (cpu_is_hotpluggable(cpu)) { | ||
| 1438 | if (verbose) | ||
| 1439 | pr_alert("%s" TORTURE_FLAG | ||
| 1440 | "rcu_torture_onoff task: onlining %d\n", | ||
| 1441 | torture_type, cpu); | ||
| 1442 | starttime = jiffies; | ||
| 1443 | n_online_attempts++; | ||
| 1444 | ret = cpu_up(cpu); | ||
| 1445 | if (ret) { | ||
| 1446 | if (verbose) | ||
| 1447 | pr_alert("%s" TORTURE_FLAG | ||
| 1448 | "rcu_torture_onoff task: online %d failed: errno %d\n", | ||
| 1449 | torture_type, cpu, ret); | ||
| 1450 | } else { | ||
| 1451 | if (verbose) | ||
| 1452 | pr_alert("%s" TORTURE_FLAG | ||
| 1453 | "rcu_torture_onoff task: onlined %d\n", | ||
| 1454 | torture_type, cpu); | ||
| 1455 | n_online_successes++; | ||
| 1456 | delta = jiffies - starttime; | ||
| 1457 | sum_online += delta; | ||
| 1458 | if (min_online < 0) { | ||
| 1459 | min_online = delta; | ||
| 1460 | max_online = delta; | ||
| 1461 | } | ||
| 1462 | if (min_online > delta) | ||
| 1463 | min_online = delta; | ||
| 1464 | if (max_online < delta) | ||
| 1465 | max_online = delta; | ||
| 1466 | } | ||
| 1467 | } | ||
| 1468 | schedule_timeout_interruptible(onoff_interval * HZ); | ||
| 1469 | } | ||
| 1470 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping"); | ||
| 1471 | return 0; | ||
| 1472 | } | ||
| 1473 | |||
| 1474 | static int | ||
| 1475 | rcu_torture_onoff_init(void) | ||
| 1476 | { | ||
| 1477 | int ret; | ||
| 1478 | |||
| 1479 | if (onoff_interval <= 0) | ||
| 1480 | return 0; | ||
| 1481 | onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff"); | ||
| 1482 | if (IS_ERR(onoff_task)) { | ||
| 1483 | ret = PTR_ERR(onoff_task); | ||
| 1484 | onoff_task = NULL; | ||
| 1485 | return ret; | ||
| 1486 | } | ||
| 1487 | return 0; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | static void rcu_torture_onoff_cleanup(void) | ||
| 1491 | { | ||
| 1492 | if (onoff_task == NULL) | ||
| 1493 | return; | ||
| 1494 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task"); | ||
| 1495 | kthread_stop(onoff_task); | ||
| 1496 | onoff_task = NULL; | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1500 | |||
| 1501 | static int | ||
| 1502 | rcu_torture_onoff_init(void) | ||
| 1503 | { | ||
| 1504 | return 0; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | static void rcu_torture_onoff_cleanup(void) | ||
| 1508 | { | ||
| 1509 | } | ||
| 1510 | |||
| 1511 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1512 | |||
| 1513 | /* | ||
| 1514 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then | 1109 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
| 1515 | * induces a CPU stall for the time specified by stall_cpu. | 1110 | * induces a CPU stall for the time specified by stall_cpu. |
| 1516 | */ | 1111 | */ |
| @@ -1518,11 +1113,11 @@ static int rcu_torture_stall(void *args) | |||
| 1518 | { | 1113 | { |
| 1519 | unsigned long stop_at; | 1114 | unsigned long stop_at; |
| 1520 | 1115 | ||
| 1521 | VERBOSE_PRINTK_STRING("rcu_torture_stall task started"); | 1116 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
| 1522 | if (stall_cpu_holdoff > 0) { | 1117 | if (stall_cpu_holdoff > 0) { |
| 1523 | VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff"); | 1118 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
| 1524 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); | 1119 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
| 1525 | VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff"); | 1120 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
| 1526 | } | 1121 | } |
| 1527 | if (!kthread_should_stop()) { | 1122 | if (!kthread_should_stop()) { |
| 1528 | stop_at = get_seconds() + stall_cpu; | 1123 | stop_at = get_seconds() + stall_cpu; |
| @@ -1536,7 +1131,7 @@ static int rcu_torture_stall(void *args) | |||
| 1536 | rcu_read_unlock(); | 1131 | rcu_read_unlock(); |
| 1537 | pr_alert("rcu_torture_stall end.\n"); | 1132 | pr_alert("rcu_torture_stall end.\n"); |
| 1538 | } | 1133 | } |
| 1539 | rcutorture_shutdown_absorb("rcu_torture_stall"); | 1134 | torture_shutdown_absorb("rcu_torture_stall"); |
| 1540 | while (!kthread_should_stop()) | 1135 | while (!kthread_should_stop()) |
| 1541 | schedule_timeout_interruptible(10 * HZ); | 1136 | schedule_timeout_interruptible(10 * HZ); |
| 1542 | return 0; | 1137 | return 0; |
| @@ -1545,27 +1140,9 @@ static int rcu_torture_stall(void *args) | |||
| 1545 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ | 1140 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
| 1546 | static int __init rcu_torture_stall_init(void) | 1141 | static int __init rcu_torture_stall_init(void) |
| 1547 | { | 1142 | { |
| 1548 | int ret; | ||
| 1549 | |||
| 1550 | if (stall_cpu <= 0) | 1143 | if (stall_cpu <= 0) |
| 1551 | return 0; | 1144 | return 0; |
| 1552 | stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall"); | 1145 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
| 1553 | if (IS_ERR(stall_task)) { | ||
| 1554 | ret = PTR_ERR(stall_task); | ||
| 1555 | stall_task = NULL; | ||
| 1556 | return ret; | ||
| 1557 | } | ||
| 1558 | return 0; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | /* Clean up after the CPU-stall kthread, if one was spawned. */ | ||
| 1562 | static void rcu_torture_stall_cleanup(void) | ||
| 1563 | { | ||
| 1564 | if (stall_task == NULL) | ||
| 1565 | return; | ||
| 1566 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task."); | ||
| 1567 | kthread_stop(stall_task); | ||
| 1568 | stall_task = NULL; | ||
| 1569 | } | 1146 | } |
| 1570 | 1147 | ||
| 1571 | /* Callback function for RCU barrier testing. */ | 1148 | /* Callback function for RCU barrier testing. */ |
| @@ -1583,28 +1160,24 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
| 1583 | struct rcu_head rcu; | 1160 | struct rcu_head rcu; |
| 1584 | 1161 | ||
| 1585 | init_rcu_head_on_stack(&rcu); | 1162 | init_rcu_head_on_stack(&rcu); |
| 1586 | VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started"); | 1163 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
| 1587 | set_user_nice(current, 19); | 1164 | set_user_nice(current, MAX_NICE); |
| 1588 | do { | 1165 | do { |
| 1589 | wait_event(barrier_cbs_wq[myid], | 1166 | wait_event(barrier_cbs_wq[myid], |
| 1590 | (newphase = | 1167 | (newphase = |
| 1591 | ACCESS_ONCE(barrier_phase)) != lastphase || | 1168 | ACCESS_ONCE(barrier_phase)) != lastphase || |
| 1592 | kthread_should_stop() || | 1169 | torture_must_stop()); |
| 1593 | fullstop != FULLSTOP_DONTSTOP); | ||
| 1594 | lastphase = newphase; | 1170 | lastphase = newphase; |
| 1595 | smp_mb(); /* ensure barrier_phase load before ->call(). */ | 1171 | smp_mb(); /* ensure barrier_phase load before ->call(). */ |
| 1596 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | 1172 | if (torture_must_stop()) |
| 1597 | break; | 1173 | break; |
| 1598 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); | 1174 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
| 1599 | if (atomic_dec_and_test(&barrier_cbs_count)) | 1175 | if (atomic_dec_and_test(&barrier_cbs_count)) |
| 1600 | wake_up(&barrier_wq); | 1176 | wake_up(&barrier_wq); |
| 1601 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 1177 | } while (!torture_must_stop()); |
| 1602 | VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping"); | ||
| 1603 | rcutorture_shutdown_absorb("rcu_torture_barrier_cbs"); | ||
| 1604 | while (!kthread_should_stop()) | ||
| 1605 | schedule_timeout_interruptible(1); | ||
| 1606 | cur_ops->cb_barrier(); | 1178 | cur_ops->cb_barrier(); |
| 1607 | destroy_rcu_head_on_stack(&rcu); | 1179 | destroy_rcu_head_on_stack(&rcu); |
| 1180 | torture_kthread_stopping("rcu_torture_barrier_cbs"); | ||
| 1608 | return 0; | 1181 | return 0; |
| 1609 | } | 1182 | } |
| 1610 | 1183 | ||
| @@ -1613,7 +1186,7 @@ static int rcu_torture_barrier(void *arg) | |||
| 1613 | { | 1186 | { |
| 1614 | int i; | 1187 | int i; |
| 1615 | 1188 | ||
| 1616 | VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting"); | 1189 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
| 1617 | do { | 1190 | do { |
| 1618 | atomic_set(&barrier_cbs_invoked, 0); | 1191 | atomic_set(&barrier_cbs_invoked, 0); |
| 1619 | atomic_set(&barrier_cbs_count, n_barrier_cbs); | 1192 | atomic_set(&barrier_cbs_count, n_barrier_cbs); |
| @@ -1623,9 +1196,8 @@ static int rcu_torture_barrier(void *arg) | |||
| 1623 | wake_up(&barrier_cbs_wq[i]); | 1196 | wake_up(&barrier_cbs_wq[i]); |
| 1624 | wait_event(barrier_wq, | 1197 | wait_event(barrier_wq, |
| 1625 | atomic_read(&barrier_cbs_count) == 0 || | 1198 | atomic_read(&barrier_cbs_count) == 0 || |
| 1626 | kthread_should_stop() || | 1199 | torture_must_stop()); |
| 1627 | fullstop != FULLSTOP_DONTSTOP); | 1200 | if (torture_must_stop()) |
| 1628 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | ||
| 1629 | break; | 1201 | break; |
| 1630 | n_barrier_attempts++; | 1202 | n_barrier_attempts++; |
| 1631 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ | 1203 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
| @@ -1635,11 +1207,8 @@ static int rcu_torture_barrier(void *arg) | |||
| 1635 | } | 1207 | } |
| 1636 | n_barrier_successes++; | 1208 | n_barrier_successes++; |
| 1637 | schedule_timeout_interruptible(HZ / 10); | 1209 | schedule_timeout_interruptible(HZ / 10); |
| 1638 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 1210 | } while (!torture_must_stop()); |
| 1639 | VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping"); | 1211 | torture_kthread_stopping("rcu_torture_barrier"); |
| 1640 | rcutorture_shutdown_absorb("rcu_torture_barrier"); | ||
| 1641 | while (!kthread_should_stop()) | ||
| 1642 | schedule_timeout_interruptible(1); | ||
| 1643 | return 0; | 1212 | return 0; |
| 1644 | } | 1213 | } |
| 1645 | 1214 | ||
| @@ -1672,24 +1241,13 @@ static int rcu_torture_barrier_init(void) | |||
| 1672 | return -ENOMEM; | 1241 | return -ENOMEM; |
| 1673 | for (i = 0; i < n_barrier_cbs; i++) { | 1242 | for (i = 0; i < n_barrier_cbs; i++) { |
| 1674 | init_waitqueue_head(&barrier_cbs_wq[i]); | 1243 | init_waitqueue_head(&barrier_cbs_wq[i]); |
| 1675 | barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs, | 1244 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
| 1676 | (void *)(long)i, | 1245 | (void *)(long)i, |
| 1677 | "rcu_torture_barrier_cbs"); | 1246 | barrier_cbs_tasks[i]); |
| 1678 | if (IS_ERR(barrier_cbs_tasks[i])) { | 1247 | if (ret) |
| 1679 | ret = PTR_ERR(barrier_cbs_tasks[i]); | ||
| 1680 | VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs"); | ||
| 1681 | barrier_cbs_tasks[i] = NULL; | ||
| 1682 | return ret; | 1248 | return ret; |
| 1683 | } | ||
| 1684 | } | 1249 | } |
| 1685 | barrier_task = kthread_run(rcu_torture_barrier, NULL, | 1250 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
| 1686 | "rcu_torture_barrier"); | ||
| 1687 | if (IS_ERR(barrier_task)) { | ||
| 1688 | ret = PTR_ERR(barrier_task); | ||
| 1689 | VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier"); | ||
| 1690 | barrier_task = NULL; | ||
| 1691 | } | ||
| 1692 | return 0; | ||
| 1693 | } | 1251 | } |
| 1694 | 1252 | ||
| 1695 | /* Clean up after RCU barrier testing. */ | 1253 | /* Clean up after RCU barrier testing. */ |
| @@ -1697,19 +1255,11 @@ static void rcu_torture_barrier_cleanup(void) | |||
| 1697 | { | 1255 | { |
| 1698 | int i; | 1256 | int i; |
| 1699 | 1257 | ||
| 1700 | if (barrier_task != NULL) { | 1258 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
| 1701 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task"); | ||
| 1702 | kthread_stop(barrier_task); | ||
| 1703 | barrier_task = NULL; | ||
| 1704 | } | ||
| 1705 | if (barrier_cbs_tasks != NULL) { | 1259 | if (barrier_cbs_tasks != NULL) { |
| 1706 | for (i = 0; i < n_barrier_cbs; i++) { | 1260 | for (i = 0; i < n_barrier_cbs; i++) |
| 1707 | if (barrier_cbs_tasks[i] != NULL) { | 1261 | torture_stop_kthread(rcu_torture_barrier_cbs, |
| 1708 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task"); | 1262 | barrier_cbs_tasks[i]); |
| 1709 | kthread_stop(barrier_cbs_tasks[i]); | ||
| 1710 | barrier_cbs_tasks[i] = NULL; | ||
| 1711 | } | ||
| 1712 | } | ||
| 1713 | kfree(barrier_cbs_tasks); | 1263 | kfree(barrier_cbs_tasks); |
| 1714 | barrier_cbs_tasks = NULL; | 1264 | barrier_cbs_tasks = NULL; |
| 1715 | } | 1265 | } |
| @@ -1747,90 +1297,42 @@ rcu_torture_cleanup(void) | |||
| 1747 | { | 1297 | { |
| 1748 | int i; | 1298 | int i; |
| 1749 | 1299 | ||
| 1750 | mutex_lock(&fullstop_mutex); | ||
| 1751 | rcutorture_record_test_transition(); | 1300 | rcutorture_record_test_transition(); |
| 1752 | if (fullstop == FULLSTOP_SHUTDOWN) { | 1301 | if (torture_cleanup()) { |
| 1753 | pr_warn(/* but going down anyway, so... */ | ||
| 1754 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
| 1755 | mutex_unlock(&fullstop_mutex); | ||
| 1756 | schedule_timeout_uninterruptible(10); | ||
| 1757 | if (cur_ops->cb_barrier != NULL) | 1302 | if (cur_ops->cb_barrier != NULL) |
| 1758 | cur_ops->cb_barrier(); | 1303 | cur_ops->cb_barrier(); |
| 1759 | return; | 1304 | return; |
| 1760 | } | 1305 | } |
| 1761 | fullstop = FULLSTOP_RMMOD; | ||
| 1762 | mutex_unlock(&fullstop_mutex); | ||
| 1763 | unregister_reboot_notifier(&rcutorture_shutdown_nb); | ||
| 1764 | rcu_torture_barrier_cleanup(); | ||
| 1765 | rcu_torture_stall_cleanup(); | ||
| 1766 | if (stutter_task) { | ||
| 1767 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | ||
| 1768 | kthread_stop(stutter_task); | ||
| 1769 | } | ||
| 1770 | stutter_task = NULL; | ||
| 1771 | if (shuffler_task) { | ||
| 1772 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); | ||
| 1773 | kthread_stop(shuffler_task); | ||
| 1774 | free_cpumask_var(shuffle_tmp_mask); | ||
| 1775 | } | ||
| 1776 | shuffler_task = NULL; | ||
| 1777 | 1306 | ||
| 1778 | if (writer_task) { | 1307 | rcu_torture_barrier_cleanup(); |
| 1779 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); | 1308 | torture_stop_kthread(rcu_torture_stall, stall_task); |
| 1780 | kthread_stop(writer_task); | 1309 | torture_stop_kthread(rcu_torture_writer, writer_task); |
| 1781 | } | ||
| 1782 | writer_task = NULL; | ||
| 1783 | 1310 | ||
| 1784 | if (reader_tasks) { | 1311 | if (reader_tasks) { |
| 1785 | for (i = 0; i < nrealreaders; i++) { | 1312 | for (i = 0; i < nrealreaders; i++) |
| 1786 | if (reader_tasks[i]) { | 1313 | torture_stop_kthread(rcu_torture_reader, |
| 1787 | VERBOSE_PRINTK_STRING( | 1314 | reader_tasks[i]); |
| 1788 | "Stopping rcu_torture_reader task"); | ||
| 1789 | kthread_stop(reader_tasks[i]); | ||
| 1790 | } | ||
| 1791 | reader_tasks[i] = NULL; | ||
| 1792 | } | ||
| 1793 | kfree(reader_tasks); | 1315 | kfree(reader_tasks); |
| 1794 | reader_tasks = NULL; | ||
| 1795 | } | 1316 | } |
| 1796 | rcu_torture_current = NULL; | 1317 | rcu_torture_current = NULL; |
| 1797 | 1318 | ||
| 1798 | if (fakewriter_tasks) { | 1319 | if (fakewriter_tasks) { |
| 1799 | for (i = 0; i < nfakewriters; i++) { | 1320 | for (i = 0; i < nfakewriters; i++) { |
| 1800 | if (fakewriter_tasks[i]) { | 1321 | torture_stop_kthread(rcu_torture_fakewriter, |
| 1801 | VERBOSE_PRINTK_STRING( | 1322 | fakewriter_tasks[i]); |
| 1802 | "Stopping rcu_torture_fakewriter task"); | ||
| 1803 | kthread_stop(fakewriter_tasks[i]); | ||
| 1804 | } | ||
| 1805 | fakewriter_tasks[i] = NULL; | ||
| 1806 | } | 1323 | } |
| 1807 | kfree(fakewriter_tasks); | 1324 | kfree(fakewriter_tasks); |
| 1808 | fakewriter_tasks = NULL; | 1325 | fakewriter_tasks = NULL; |
| 1809 | } | 1326 | } |
| 1810 | 1327 | ||
| 1811 | if (stats_task) { | 1328 | torture_stop_kthread(rcu_torture_stats, stats_task); |
| 1812 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); | 1329 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
| 1813 | kthread_stop(stats_task); | ||
| 1814 | } | ||
| 1815 | stats_task = NULL; | ||
| 1816 | |||
| 1817 | if (fqs_task) { | ||
| 1818 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task"); | ||
| 1819 | kthread_stop(fqs_task); | ||
| 1820 | } | ||
| 1821 | fqs_task = NULL; | ||
| 1822 | if ((test_boost == 1 && cur_ops->can_boost) || | 1330 | if ((test_boost == 1 && cur_ops->can_boost) || |
| 1823 | test_boost == 2) { | 1331 | test_boost == 2) { |
| 1824 | unregister_cpu_notifier(&rcutorture_cpu_nb); | 1332 | unregister_cpu_notifier(&rcutorture_cpu_nb); |
| 1825 | for_each_possible_cpu(i) | 1333 | for_each_possible_cpu(i) |
| 1826 | rcutorture_booster_cleanup(i); | 1334 | rcutorture_booster_cleanup(i); |
| 1827 | } | 1335 | } |
| 1828 | if (shutdown_task != NULL) { | ||
| 1829 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task"); | ||
| 1830 | kthread_stop(shutdown_task); | ||
| 1831 | } | ||
| 1832 | shutdown_task = NULL; | ||
| 1833 | rcu_torture_onoff_cleanup(); | ||
| 1834 | 1336 | ||
| 1835 | /* Wait for all RCU callbacks to fire. */ | 1337 | /* Wait for all RCU callbacks to fire. */ |
| 1836 | 1338 | ||
| @@ -1841,8 +1343,7 @@ rcu_torture_cleanup(void) | |||
| 1841 | 1343 | ||
| 1842 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) | 1344 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
| 1843 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); | 1345 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
| 1844 | else if (n_online_successes != n_online_attempts || | 1346 | else if (torture_onoff_failures()) |
| 1845 | n_offline_successes != n_offline_attempts) | ||
| 1846 | rcu_torture_print_module_parms(cur_ops, | 1347 | rcu_torture_print_module_parms(cur_ops, |
| 1847 | "End of test: RCU_HOTPLUG"); | 1348 | "End of test: RCU_HOTPLUG"); |
| 1848 | else | 1349 | else |
| @@ -1911,12 +1412,11 @@ rcu_torture_init(void) | |||
| 1911 | int i; | 1412 | int i; |
| 1912 | int cpu; | 1413 | int cpu; |
| 1913 | int firsterr = 0; | 1414 | int firsterr = 0; |
| 1914 | int retval; | ||
| 1915 | static struct rcu_torture_ops *torture_ops[] = { | 1415 | static struct rcu_torture_ops *torture_ops[] = { |
| 1916 | &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops, | 1416 | &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, |
| 1917 | }; | 1417 | }; |
| 1918 | 1418 | ||
| 1919 | mutex_lock(&fullstop_mutex); | 1419 | torture_init_begin(torture_type, verbose, &rcutorture_runnable); |
| 1920 | 1420 | ||
| 1921 | /* Process args and tell the world that the torturer is on the job. */ | 1421 | /* Process args and tell the world that the torturer is on the job. */ |
| 1922 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | 1422 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
| @@ -1931,7 +1431,7 @@ rcu_torture_init(void) | |||
| 1931 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | 1431 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
| 1932 | pr_alert(" %s", torture_ops[i]->name); | 1432 | pr_alert(" %s", torture_ops[i]->name); |
| 1933 | pr_alert("\n"); | 1433 | pr_alert("\n"); |
| 1934 | mutex_unlock(&fullstop_mutex); | 1434 | torture_init_end(); |
| 1935 | return -EINVAL; | 1435 | return -EINVAL; |
| 1936 | } | 1436 | } |
| 1937 | if (cur_ops->fqs == NULL && fqs_duration != 0) { | 1437 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
| @@ -1946,7 +1446,6 @@ rcu_torture_init(void) | |||
| 1946 | else | 1446 | else |
| 1947 | nrealreaders = 2 * num_online_cpus(); | 1447 | nrealreaders = 2 * num_online_cpus(); |
| 1948 | rcu_torture_print_module_parms(cur_ops, "Start of test"); | 1448 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
| 1949 | fullstop = FULLSTOP_DONTSTOP; | ||
| 1950 | 1449 | ||
| 1951 | /* Set up the freelist. */ | 1450 | /* Set up the freelist. */ |
| 1952 | 1451 | ||
| @@ -1982,108 +1481,61 @@ rcu_torture_init(void) | |||
| 1982 | 1481 | ||
| 1983 | /* Start up the kthreads. */ | 1482 | /* Start up the kthreads. */ |
| 1984 | 1483 | ||
| 1985 | VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); | 1484 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
| 1986 | writer_task = kthread_create(rcu_torture_writer, NULL, | 1485 | writer_task); |
| 1987 | "rcu_torture_writer"); | 1486 | if (firsterr) |
| 1988 | if (IS_ERR(writer_task)) { | ||
| 1989 | firsterr = PTR_ERR(writer_task); | ||
| 1990 | VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); | ||
| 1991 | writer_task = NULL; | ||
| 1992 | goto unwind; | 1487 | goto unwind; |
| 1993 | } | ||
| 1994 | wake_up_process(writer_task); | ||
| 1995 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | 1488 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
| 1996 | GFP_KERNEL); | 1489 | GFP_KERNEL); |
| 1997 | if (fakewriter_tasks == NULL) { | 1490 | if (fakewriter_tasks == NULL) { |
| 1998 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1491 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
| 1999 | firsterr = -ENOMEM; | 1492 | firsterr = -ENOMEM; |
| 2000 | goto unwind; | 1493 | goto unwind; |
| 2001 | } | 1494 | } |
| 2002 | for (i = 0; i < nfakewriters; i++) { | 1495 | for (i = 0; i < nfakewriters; i++) { |
| 2003 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); | 1496 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
| 2004 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, | 1497 | NULL, fakewriter_tasks[i]); |
| 2005 | "rcu_torture_fakewriter"); | 1498 | if (firsterr) |
| 2006 | if (IS_ERR(fakewriter_tasks[i])) { | ||
| 2007 | firsterr = PTR_ERR(fakewriter_tasks[i]); | ||
| 2008 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); | ||
| 2009 | fakewriter_tasks[i] = NULL; | ||
| 2010 | goto unwind; | 1499 | goto unwind; |
| 2011 | } | ||
| 2012 | } | 1500 | } |
| 2013 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), | 1501 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), |
| 2014 | GFP_KERNEL); | 1502 | GFP_KERNEL); |
| 2015 | if (reader_tasks == NULL) { | 1503 | if (reader_tasks == NULL) { |
| 2016 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1504 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
| 2017 | firsterr = -ENOMEM; | 1505 | firsterr = -ENOMEM; |
| 2018 | goto unwind; | 1506 | goto unwind; |
| 2019 | } | 1507 | } |
| 2020 | for (i = 0; i < nrealreaders; i++) { | 1508 | for (i = 0; i < nrealreaders; i++) { |
| 2021 | VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task"); | 1509 | firsterr = torture_create_kthread(rcu_torture_reader, NULL, |
| 2022 | reader_tasks[i] = kthread_run(rcu_torture_reader, NULL, | 1510 | reader_tasks[i]); |
| 2023 | "rcu_torture_reader"); | 1511 | if (firsterr) |
| 2024 | if (IS_ERR(reader_tasks[i])) { | ||
| 2025 | firsterr = PTR_ERR(reader_tasks[i]); | ||
| 2026 | VERBOSE_PRINTK_ERRSTRING("Failed to create reader"); | ||
| 2027 | reader_tasks[i] = NULL; | ||
| 2028 | goto unwind; | 1512 | goto unwind; |
| 2029 | } | ||
| 2030 | } | 1513 | } |
| 2031 | if (stat_interval > 0) { | 1514 | if (stat_interval > 0) { |
| 2032 | VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task"); | 1515 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
| 2033 | stats_task = kthread_run(rcu_torture_stats, NULL, | 1516 | stats_task); |
| 2034 | "rcu_torture_stats"); | 1517 | if (firsterr) |
| 2035 | if (IS_ERR(stats_task)) { | ||
| 2036 | firsterr = PTR_ERR(stats_task); | ||
| 2037 | VERBOSE_PRINTK_ERRSTRING("Failed to create stats"); | ||
| 2038 | stats_task = NULL; | ||
| 2039 | goto unwind; | 1518 | goto unwind; |
| 2040 | } | ||
| 2041 | } | 1519 | } |
| 2042 | if (test_no_idle_hz) { | 1520 | if (test_no_idle_hz) { |
| 2043 | rcu_idle_cpu = num_online_cpus() - 1; | 1521 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
| 2044 | 1522 | if (firsterr) | |
| 2045 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | ||
| 2046 | firsterr = -ENOMEM; | ||
| 2047 | VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); | ||
| 2048 | goto unwind; | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | /* Create the shuffler thread */ | ||
| 2052 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, | ||
| 2053 | "rcu_torture_shuffle"); | ||
| 2054 | if (IS_ERR(shuffler_task)) { | ||
| 2055 | free_cpumask_var(shuffle_tmp_mask); | ||
| 2056 | firsterr = PTR_ERR(shuffler_task); | ||
| 2057 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); | ||
| 2058 | shuffler_task = NULL; | ||
| 2059 | goto unwind; | 1523 | goto unwind; |
| 2060 | } | ||
| 2061 | } | 1524 | } |
| 2062 | if (stutter < 0) | 1525 | if (stutter < 0) |
| 2063 | stutter = 0; | 1526 | stutter = 0; |
| 2064 | if (stutter) { | 1527 | if (stutter) { |
| 2065 | /* Create the stutter thread */ | 1528 | firsterr = torture_stutter_init(stutter * HZ); |
| 2066 | stutter_task = kthread_run(rcu_torture_stutter, NULL, | 1529 | if (firsterr) |
| 2067 | "rcu_torture_stutter"); | ||
| 2068 | if (IS_ERR(stutter_task)) { | ||
| 2069 | firsterr = PTR_ERR(stutter_task); | ||
| 2070 | VERBOSE_PRINTK_ERRSTRING("Failed to create stutter"); | ||
| 2071 | stutter_task = NULL; | ||
| 2072 | goto unwind; | 1530 | goto unwind; |
| 2073 | } | ||
| 2074 | } | 1531 | } |
| 2075 | if (fqs_duration < 0) | 1532 | if (fqs_duration < 0) |
| 2076 | fqs_duration = 0; | 1533 | fqs_duration = 0; |
| 2077 | if (fqs_duration) { | 1534 | if (fqs_duration) { |
| 2078 | /* Create the stutter thread */ | 1535 | /* Create the fqs thread */ |
| 2079 | fqs_task = kthread_run(rcu_torture_fqs, NULL, | 1536 | torture_create_kthread(rcu_torture_fqs, NULL, fqs_task); |
| 2080 | "rcu_torture_fqs"); | 1537 | if (firsterr) |
| 2081 | if (IS_ERR(fqs_task)) { | ||
| 2082 | firsterr = PTR_ERR(fqs_task); | ||
| 2083 | VERBOSE_PRINTK_ERRSTRING("Failed to create fqs"); | ||
| 2084 | fqs_task = NULL; | ||
| 2085 | goto unwind; | 1538 | goto unwind; |
| 2086 | } | ||
| 2087 | } | 1539 | } |
| 2088 | if (test_boost_interval < 1) | 1540 | if (test_boost_interval < 1) |
| 2089 | test_boost_interval = 1; | 1541 | test_boost_interval = 1; |
| @@ -2097,49 +1549,31 @@ rcu_torture_init(void) | |||
| 2097 | for_each_possible_cpu(i) { | 1549 | for_each_possible_cpu(i) { |
| 2098 | if (cpu_is_offline(i)) | 1550 | if (cpu_is_offline(i)) |
| 2099 | continue; /* Heuristic: CPU can go offline. */ | 1551 | continue; /* Heuristic: CPU can go offline. */ |
| 2100 | retval = rcutorture_booster_init(i); | 1552 | firsterr = rcutorture_booster_init(i); |
| 2101 | if (retval < 0) { | 1553 | if (firsterr) |
| 2102 | firsterr = retval; | ||
| 2103 | goto unwind; | 1554 | goto unwind; |
| 2104 | } | ||
| 2105 | } | 1555 | } |
| 2106 | } | 1556 | } |
| 2107 | if (shutdown_secs > 0) { | 1557 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
| 2108 | shutdown_time = jiffies + shutdown_secs * HZ; | 1558 | if (firsterr) |
| 2109 | shutdown_task = kthread_create(rcu_torture_shutdown, NULL, | ||
| 2110 | "rcu_torture_shutdown"); | ||
| 2111 | if (IS_ERR(shutdown_task)) { | ||
| 2112 | firsterr = PTR_ERR(shutdown_task); | ||
| 2113 | VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); | ||
| 2114 | shutdown_task = NULL; | ||
| 2115 | goto unwind; | ||
| 2116 | } | ||
| 2117 | wake_up_process(shutdown_task); | ||
| 2118 | } | ||
| 2119 | i = rcu_torture_onoff_init(); | ||
| 2120 | if (i != 0) { | ||
| 2121 | firsterr = i; | ||
| 2122 | goto unwind; | 1559 | goto unwind; |
| 2123 | } | 1560 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); |
| 2124 | register_reboot_notifier(&rcutorture_shutdown_nb); | 1561 | if (firsterr) |
| 2125 | i = rcu_torture_stall_init(); | ||
| 2126 | if (i != 0) { | ||
| 2127 | firsterr = i; | ||
| 2128 | goto unwind; | 1562 | goto unwind; |
| 2129 | } | 1563 | firsterr = rcu_torture_stall_init(); |
| 2130 | retval = rcu_torture_barrier_init(); | 1564 | if (firsterr) |
| 2131 | if (retval != 0) { | 1565 | goto unwind; |
| 2132 | firsterr = retval; | 1566 | firsterr = rcu_torture_barrier_init(); |
| 1567 | if (firsterr) | ||
| 2133 | goto unwind; | 1568 | goto unwind; |
| 2134 | } | ||
| 2135 | if (object_debug) | 1569 | if (object_debug) |
| 2136 | rcu_test_debug_objects(); | 1570 | rcu_test_debug_objects(); |
| 2137 | rcutorture_record_test_transition(); | 1571 | rcutorture_record_test_transition(); |
| 2138 | mutex_unlock(&fullstop_mutex); | 1572 | torture_init_end(); |
| 2139 | return 0; | 1573 | return 0; |
| 2140 | 1574 | ||
| 2141 | unwind: | 1575 | unwind: |
| 2142 | mutex_unlock(&fullstop_mutex); | 1576 | torture_init_end(); |
| 2143 | rcu_torture_cleanup(); | 1577 | rcu_torture_cleanup(); |
| 2144 | return firsterr; | 1578 | return firsterr; |
| 2145 | } | 1579 | } |
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 3318d8284384..c639556f3fa0 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2006 | 18 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * Copyright (C) Fujitsu, 2012 | 19 | * Copyright (C) Fujitsu, 2012 |
| @@ -36,8 +36,6 @@ | |||
| 36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
| 37 | #include <linux/srcu.h> | 37 | #include <linux/srcu.h> |
| 38 | 38 | ||
| 39 | #include <trace/events/rcu.h> | ||
| 40 | |||
| 41 | #include "rcu.h" | 39 | #include "rcu.h" |
| 42 | 40 | ||
| 43 | /* | 41 | /* |
| @@ -398,7 +396,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | |||
| 398 | rcu_batch_queue(&sp->batch_queue, head); | 396 | rcu_batch_queue(&sp->batch_queue, head); |
| 399 | if (!sp->running) { | 397 | if (!sp->running) { |
| 400 | sp->running = true; | 398 | sp->running = true; |
| 401 | schedule_delayed_work(&sp->work, 0); | 399 | queue_delayed_work(system_power_efficient_wq, &sp->work, 0); |
| 402 | } | 400 | } |
| 403 | spin_unlock_irqrestore(&sp->queue_lock, flags); | 401 | spin_unlock_irqrestore(&sp->queue_lock, flags); |
| 404 | } | 402 | } |
| @@ -674,7 +672,8 @@ static void srcu_reschedule(struct srcu_struct *sp) | |||
| 674 | } | 672 | } |
| 675 | 673 | ||
| 676 | if (pending) | 674 | if (pending) |
| 677 | schedule_delayed_work(&sp->work, SRCU_INTERVAL); | 675 | queue_delayed_work(system_power_efficient_wq, |
| 676 | &sp->work, SRCU_INTERVAL); | ||
| 678 | } | 677 | } |
| 679 | 678 | ||
| 680 | /* | 679 | /* |
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 1254f312d024..d9efcc13008c 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright IBM Corporation, 2008 | 18 | * Copyright IBM Corporation, 2008 |
| 19 | * | 19 | * |
| @@ -37,10 +37,6 @@ | |||
| 37 | #include <linux/prefetch.h> | 37 | #include <linux/prefetch.h> |
| 38 | #include <linux/ftrace_event.h> | 38 | #include <linux/ftrace_event.h> |
| 39 | 39 | ||
| 40 | #ifdef CONFIG_RCU_TRACE | ||
| 41 | #include <trace/events/rcu.h> | ||
| 42 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | ||
| 43 | |||
| 44 | #include "rcu.h" | 40 | #include "rcu.h" |
| 45 | 41 | ||
| 46 | /* Forward declarations for tiny_plugin.h. */ | 42 | /* Forward declarations for tiny_plugin.h. */ |
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index 280d06cae352..431528520562 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h | |||
| @@ -14,8 +14,8 @@ | |||
| 14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
| 15 | * | 15 | * |
| 16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, you can access it online at |
| 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 19 | * | 19 | * |
| 20 | * Copyright (c) 2010 Linaro | 20 | * Copyright (c) 2010 Linaro |
| 21 | * | 21 | * |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b3d116cd072d..0c47e300210a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright IBM Corporation, 2008 | 18 | * Copyright IBM Corporation, 2008 |
| 19 | * | 19 | * |
| @@ -58,8 +58,6 @@ | |||
| 58 | #include <linux/suspend.h> | 58 | #include <linux/suspend.h> |
| 59 | 59 | ||
| 60 | #include "tree.h" | 60 | #include "tree.h" |
| 61 | #include <trace/events/rcu.h> | ||
| 62 | |||
| 63 | #include "rcu.h" | 61 | #include "rcu.h" |
| 64 | 62 | ||
| 65 | MODULE_ALIAS("rcutree"); | 63 | MODULE_ALIAS("rcutree"); |
| @@ -837,7 +835,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
| 837 | * to the next. Only do this for the primary flavor of RCU. | 835 | * to the next. Only do this for the primary flavor of RCU. |
| 838 | */ | 836 | */ |
| 839 | if (rdp->rsp == rcu_state && | 837 | if (rdp->rsp == rcu_state && |
| 840 | ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) { | 838 | ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { |
| 841 | rdp->rsp->jiffies_resched += 5; | 839 | rdp->rsp->jiffies_resched += 5; |
| 842 | resched_cpu(rdp->cpu); | 840 | resched_cpu(rdp->cpu); |
| 843 | } | 841 | } |
| @@ -847,7 +845,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
| 847 | 845 | ||
| 848 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 846 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| 849 | { | 847 | { |
| 850 | unsigned long j = ACCESS_ONCE(jiffies); | 848 | unsigned long j = jiffies; |
| 851 | unsigned long j1; | 849 | unsigned long j1; |
| 852 | 850 | ||
| 853 | rsp->gp_start = j; | 851 | rsp->gp_start = j; |
| @@ -1005,7 +1003,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1005 | 1003 | ||
| 1006 | if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) | 1004 | if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) |
| 1007 | return; | 1005 | return; |
| 1008 | j = ACCESS_ONCE(jiffies); | 1006 | j = jiffies; |
| 1009 | 1007 | ||
| 1010 | /* | 1008 | /* |
| 1011 | * Lots of memory barriers to reject false positives. | 1009 | * Lots of memory barriers to reject false positives. |
| @@ -1423,13 +1421,14 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
| 1423 | 1421 | ||
| 1424 | /* Advance to a new grace period and initialize state. */ | 1422 | /* Advance to a new grace period and initialize state. */ |
| 1425 | record_gp_stall_check_time(rsp); | 1423 | record_gp_stall_check_time(rsp); |
| 1426 | smp_wmb(); /* Record GP times before starting GP. */ | 1424 | /* Record GP times before starting GP, hence smp_store_release(). */ |
| 1427 | rsp->gpnum++; | 1425 | smp_store_release(&rsp->gpnum, rsp->gpnum + 1); |
| 1428 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); | 1426 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); |
| 1429 | raw_spin_unlock_irq(&rnp->lock); | 1427 | raw_spin_unlock_irq(&rnp->lock); |
| 1430 | 1428 | ||
| 1431 | /* Exclude any concurrent CPU-hotplug operations. */ | 1429 | /* Exclude any concurrent CPU-hotplug operations. */ |
| 1432 | mutex_lock(&rsp->onoff_mutex); | 1430 | mutex_lock(&rsp->onoff_mutex); |
| 1431 | smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */ | ||
| 1433 | 1432 | ||
| 1434 | /* | 1433 | /* |
| 1435 | * Set the quiescent-state-needed bits in all the rcu_node | 1434 | * Set the quiescent-state-needed bits in all the rcu_node |
| @@ -1557,10 +1556,11 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
| 1557 | } | 1556 | } |
| 1558 | rnp = rcu_get_root(rsp); | 1557 | rnp = rcu_get_root(rsp); |
| 1559 | raw_spin_lock_irq(&rnp->lock); | 1558 | raw_spin_lock_irq(&rnp->lock); |
| 1560 | smp_mb__after_unlock_lock(); | 1559 | smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */ |
| 1561 | rcu_nocb_gp_set(rnp, nocb); | 1560 | rcu_nocb_gp_set(rnp, nocb); |
| 1562 | 1561 | ||
| 1563 | rsp->completed = rsp->gpnum; /* Declare grace period done. */ | 1562 | /* Declare grace period done. */ |
| 1563 | ACCESS_ONCE(rsp->completed) = rsp->gpnum; | ||
| 1564 | trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); | 1564 | trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); |
| 1565 | rsp->fqs_state = RCU_GP_IDLE; | 1565 | rsp->fqs_state = RCU_GP_IDLE; |
| 1566 | rdp = this_cpu_ptr(rsp->rda); | 1566 | rdp = this_cpu_ptr(rsp->rda); |
| @@ -2304,7 +2304,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
| 2304 | if (rnp_old != NULL) | 2304 | if (rnp_old != NULL) |
| 2305 | raw_spin_unlock(&rnp_old->fqslock); | 2305 | raw_spin_unlock(&rnp_old->fqslock); |
| 2306 | if (ret) { | 2306 | if (ret) { |
| 2307 | rsp->n_force_qs_lh++; | 2307 | ACCESS_ONCE(rsp->n_force_qs_lh)++; |
| 2308 | return; | 2308 | return; |
| 2309 | } | 2309 | } |
| 2310 | rnp_old = rnp; | 2310 | rnp_old = rnp; |
| @@ -2316,7 +2316,7 @@ static void force_quiescent_state(struct rcu_state *rsp) | |||
| 2316 | smp_mb__after_unlock_lock(); | 2316 | smp_mb__after_unlock_lock(); |
| 2317 | raw_spin_unlock(&rnp_old->fqslock); | 2317 | raw_spin_unlock(&rnp_old->fqslock); |
| 2318 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 2318 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
| 2319 | rsp->n_force_qs_lh++; | 2319 | ACCESS_ONCE(rsp->n_force_qs_lh)++; |
| 2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | 2320 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
| 2321 | return; /* Someone beat us to it. */ | 2321 | return; /* Someone beat us to it. */ |
| 2322 | } | 2322 | } |
| @@ -2639,6 +2639,58 @@ void synchronize_rcu_bh(void) | |||
| 2639 | } | 2639 | } |
| 2640 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 2640 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| 2641 | 2641 | ||
| 2642 | /** | ||
| 2643 | * get_state_synchronize_rcu - Snapshot current RCU state | ||
| 2644 | * | ||
| 2645 | * Returns a cookie that is used by a later call to cond_synchronize_rcu() | ||
| 2646 | * to determine whether or not a full grace period has elapsed in the | ||
| 2647 | * meantime. | ||
| 2648 | */ | ||
| 2649 | unsigned long get_state_synchronize_rcu(void) | ||
| 2650 | { | ||
| 2651 | /* | ||
| 2652 | * Any prior manipulation of RCU-protected data must happen | ||
| 2653 | * before the load from ->gpnum. | ||
| 2654 | */ | ||
| 2655 | smp_mb(); /* ^^^ */ | ||
| 2656 | |||
| 2657 | /* | ||
| 2658 | * Make sure this load happens before the purportedly | ||
| 2659 | * time-consuming work between get_state_synchronize_rcu() | ||
| 2660 | * and cond_synchronize_rcu(). | ||
| 2661 | */ | ||
| 2662 | return smp_load_acquire(&rcu_state->gpnum); | ||
| 2663 | } | ||
| 2664 | EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); | ||
| 2665 | |||
| 2666 | /** | ||
| 2667 | * cond_synchronize_rcu - Conditionally wait for an RCU grace period | ||
| 2668 | * | ||
| 2669 | * @oldstate: return value from earlier call to get_state_synchronize_rcu() | ||
| 2670 | * | ||
| 2671 | * If a full RCU grace period has elapsed since the earlier call to | ||
| 2672 | * get_state_synchronize_rcu(), just return. Otherwise, invoke | ||
| 2673 | * synchronize_rcu() to wait for a full grace period. | ||
| 2674 | * | ||
| 2675 | * Yes, this function does not take counter wrap into account. But | ||
| 2676 | * counter wrap is harmless. If the counter wraps, we have waited for | ||
| 2677 | * more than 2 billion grace periods (and way more on a 64-bit system!), | ||
| 2678 | * so waiting for one additional grace period should be just fine. | ||
| 2679 | */ | ||
| 2680 | void cond_synchronize_rcu(unsigned long oldstate) | ||
| 2681 | { | ||
| 2682 | unsigned long newstate; | ||
| 2683 | |||
| 2684 | /* | ||
| 2685 | * Ensure that this load happens before any RCU-destructive | ||
| 2686 | * actions the caller might carry out after we return. | ||
| 2687 | */ | ||
| 2688 | newstate = smp_load_acquire(&rcu_state->completed); | ||
| 2689 | if (ULONG_CMP_GE(oldstate, newstate)) | ||
| 2690 | synchronize_rcu(); | ||
| 2691 | } | ||
| 2692 | EXPORT_SYMBOL_GPL(cond_synchronize_rcu); | ||
| 2693 | |||
| 2642 | static int synchronize_sched_expedited_cpu_stop(void *data) | 2694 | static int synchronize_sched_expedited_cpu_stop(void *data) |
| 2643 | { | 2695 | { |
| 2644 | /* | 2696 | /* |
| @@ -2880,7 +2932,7 @@ static int rcu_pending(int cpu) | |||
| 2880 | * non-NULL, store an indication of whether all callbacks are lazy. | 2932 | * non-NULL, store an indication of whether all callbacks are lazy. |
| 2881 | * (If there are no callbacks, all of them are deemed to be lazy.) | 2933 | * (If there are no callbacks, all of them are deemed to be lazy.) |
| 2882 | */ | 2934 | */ |
| 2883 | static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy) | 2935 | static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy) |
| 2884 | { | 2936 | { |
| 2885 | bool al = true; | 2937 | bool al = true; |
| 2886 | bool hc = false; | 2938 | bool hc = false; |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8c19873f1ac9..75dc3c39a02a 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
| 14 | * | 14 | * |
| 15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, you can access it online at |
| 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 17 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 18 | * | 18 | * |
| 19 | * Copyright IBM Corporation, 2008 | 19 | * Copyright IBM Corporation, 2008 |
| 20 | * | 20 | * |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6e2ef4b2b920..962d1d589929 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -14,8 +14,8 @@ | |||
| 14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
| 15 | * | 15 | * |
| 16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, you can access it online at |
| 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 19 | * | 19 | * |
| 20 | * Copyright Red Hat, 2009 | 20 | * Copyright Red Hat, 2009 |
| 21 | * Copyright IBM Corporation, 2009 | 21 | * Copyright IBM Corporation, 2009 |
| @@ -1586,11 +1586,13 @@ static void rcu_prepare_kthreads(int cpu) | |||
| 1586 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs | 1586 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
| 1587 | * any flavor of RCU. | 1587 | * any flavor of RCU. |
| 1588 | */ | 1588 | */ |
| 1589 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | ||
| 1589 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | 1590 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
| 1590 | { | 1591 | { |
| 1591 | *delta_jiffies = ULONG_MAX; | 1592 | *delta_jiffies = ULONG_MAX; |
| 1592 | return rcu_cpu_has_callbacks(cpu, NULL); | 1593 | return rcu_cpu_has_callbacks(cpu, NULL); |
| 1593 | } | 1594 | } |
| 1595 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 1594 | 1596 | ||
| 1595 | /* | 1597 | /* |
| 1596 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up | 1598 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up |
| @@ -1656,7 +1658,7 @@ extern int tick_nohz_active; | |||
| 1656 | * only if it has been awhile since the last time we did so. Afterwards, | 1658 | * only if it has been awhile since the last time we did so. Afterwards, |
| 1657 | * if there are any callbacks ready for immediate invocation, return true. | 1659 | * if there are any callbacks ready for immediate invocation, return true. |
| 1658 | */ | 1660 | */ |
| 1659 | static bool rcu_try_advance_all_cbs(void) | 1661 | static bool __maybe_unused rcu_try_advance_all_cbs(void) |
| 1660 | { | 1662 | { |
| 1661 | bool cbs_ready = false; | 1663 | bool cbs_ready = false; |
| 1662 | struct rcu_data *rdp; | 1664 | struct rcu_data *rdp; |
| @@ -1696,6 +1698,7 @@ static bool rcu_try_advance_all_cbs(void) | |||
| 1696 | * | 1698 | * |
| 1697 | * The caller must have disabled interrupts. | 1699 | * The caller must have disabled interrupts. |
| 1698 | */ | 1700 | */ |
| 1701 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | ||
| 1699 | int rcu_needs_cpu(int cpu, unsigned long *dj) | 1702 | int rcu_needs_cpu(int cpu, unsigned long *dj) |
| 1700 | { | 1703 | { |
| 1701 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1704 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| @@ -1726,6 +1729,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) | |||
| 1726 | } | 1729 | } |
| 1727 | return 0; | 1730 | return 0; |
| 1728 | } | 1731 | } |
| 1732 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 1729 | 1733 | ||
| 1730 | /* | 1734 | /* |
| 1731 | * Prepare a CPU for idle from an RCU perspective. The first major task | 1735 | * Prepare a CPU for idle from an RCU perspective. The first major task |
| @@ -1739,6 +1743,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) | |||
| 1739 | */ | 1743 | */ |
| 1740 | static void rcu_prepare_for_idle(int cpu) | 1744 | static void rcu_prepare_for_idle(int cpu) |
| 1741 | { | 1745 | { |
| 1746 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | ||
| 1742 | struct rcu_data *rdp; | 1747 | struct rcu_data *rdp; |
| 1743 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1748 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| 1744 | struct rcu_node *rnp; | 1749 | struct rcu_node *rnp; |
| @@ -1790,6 +1795,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1790 | rcu_accelerate_cbs(rsp, rnp, rdp); | 1795 | rcu_accelerate_cbs(rsp, rnp, rdp); |
| 1791 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1796 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 1792 | } | 1797 | } |
| 1798 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 1793 | } | 1799 | } |
| 1794 | 1800 | ||
| 1795 | /* | 1801 | /* |
| @@ -1799,11 +1805,12 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1799 | */ | 1805 | */ |
| 1800 | static void rcu_cleanup_after_idle(int cpu) | 1806 | static void rcu_cleanup_after_idle(int cpu) |
| 1801 | { | 1807 | { |
| 1802 | 1808 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | |
| 1803 | if (rcu_is_nocb_cpu(cpu)) | 1809 | if (rcu_is_nocb_cpu(cpu)) |
| 1804 | return; | 1810 | return; |
| 1805 | if (rcu_try_advance_all_cbs()) | 1811 | if (rcu_try_advance_all_cbs()) |
| 1806 | invoke_rcu_core(); | 1812 | invoke_rcu_core(); |
| 1813 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 1807 | } | 1814 | } |
| 1808 | 1815 | ||
| 1809 | /* | 1816 | /* |
| @@ -2101,6 +2108,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) | |||
| 2101 | init_waitqueue_head(&rnp->nocb_gp_wq[1]); | 2108 | init_waitqueue_head(&rnp->nocb_gp_wq[1]); |
| 2102 | } | 2109 | } |
| 2103 | 2110 | ||
| 2111 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | ||
| 2104 | /* Is the specified CPU a no-CPUs CPU? */ | 2112 | /* Is the specified CPU a no-CPUs CPU? */ |
| 2105 | bool rcu_is_nocb_cpu(int cpu) | 2113 | bool rcu_is_nocb_cpu(int cpu) |
| 2106 | { | 2114 | { |
| @@ -2108,6 +2116,7 @@ bool rcu_is_nocb_cpu(int cpu) | |||
| 2108 | return cpumask_test_cpu(cpu, rcu_nocb_mask); | 2116 | return cpumask_test_cpu(cpu, rcu_nocb_mask); |
| 2109 | return false; | 2117 | return false; |
| 2110 | } | 2118 | } |
| 2119 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 2111 | 2120 | ||
| 2112 | /* | 2121 | /* |
| 2113 | * Enqueue the specified string of rcu_head structures onto the specified | 2122 | * Enqueue the specified string of rcu_head structures onto the specified |
| @@ -2893,7 +2902,7 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) | |||
| 2893 | * CPU unless the grace period has extended for too long. | 2902 | * CPU unless the grace period has extended for too long. |
| 2894 | * | 2903 | * |
| 2895 | * This code relies on the fact that all NO_HZ_FULL CPUs are also | 2904 | * This code relies on the fact that all NO_HZ_FULL CPUs are also |
| 2896 | * CONFIG_RCU_NOCB_CPUs. | 2905 | * CONFIG_RCU_NOCB_CPU CPUs. |
| 2897 | */ | 2906 | */ |
| 2898 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp) | 2907 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp) |
| 2899 | { | 2908 | { |
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 4def475336d4..5cdc62e1beeb 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright IBM Corporation, 2008 | 18 | * Copyright IBM Corporation, 2008 |
| 19 | * | 19 | * |
| @@ -273,7 +273,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
| 273 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", | 273 | seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", |
| 274 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 274 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
| 275 | rsp->n_force_qs - rsp->n_force_qs_ngp, | 275 | rsp->n_force_qs - rsp->n_force_qs_ngp, |
| 276 | rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen); | 276 | ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); |
| 277 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { | 277 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { |
| 278 | if (rnp->level != level) { | 278 | if (rnp->level != level) { |
| 279 | seq_puts(m, "\n"); | 279 | seq_puts(m, "\n"); |
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index c54609faf233..4c0a9b0af469 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, you can access it online at |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * | 17 | * |
| 18 | * Copyright IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
| 19 | * | 19 | * |
| @@ -49,7 +49,6 @@ | |||
| 49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
| 50 | 50 | ||
| 51 | #define CREATE_TRACE_POINTS | 51 | #define CREATE_TRACE_POINTS |
| 52 | #include <trace/events/rcu.h> | ||
| 53 | 52 | ||
| 54 | #include "rcu.h" | 53 | #include "rcu.h" |
| 55 | 54 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index 5001c9887db1..5a56d3c8dc03 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -227,7 +227,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) | |||
| 227 | * relay_remove_buf - remove a channel buffer | 227 | * relay_remove_buf - remove a channel buffer |
| 228 | * @kref: target kernel reference that contains the relay buffer | 228 | * @kref: target kernel reference that contains the relay buffer |
| 229 | * | 229 | * |
| 230 | * Removes the file from the fileystem, which also frees the | 230 | * Removes the file from the filesystem, which also frees the |
| 231 | * rchan_buf_struct and the channel buffer. Should only be called from | 231 | * rchan_buf_struct and the channel buffer. Should only be called from |
| 232 | * kref_put(). | 232 | * kref_put(). |
| 233 | */ | 233 | */ |
| @@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe, | |||
| 1195 | 1195 | ||
| 1196 | static const struct pipe_buf_operations relay_pipe_buf_ops = { | 1196 | static const struct pipe_buf_operations relay_pipe_buf_ops = { |
| 1197 | .can_merge = 0, | 1197 | .can_merge = 0, |
| 1198 | .map = generic_pipe_buf_map, | ||
| 1199 | .unmap = generic_pipe_buf_unmap, | ||
| 1200 | .confirm = generic_pipe_buf_confirm, | 1198 | .confirm = generic_pipe_buf_confirm, |
| 1201 | .release = relay_pipe_buf_release, | 1199 | .release = relay_pipe_buf_release, |
| 1202 | .steal = generic_pipe_buf_steal, | 1200 | .steal = generic_pipe_buf_steal, |
| @@ -1253,7 +1251,7 @@ static ssize_t subbuf_splice_actor(struct file *in, | |||
| 1253 | subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; | 1251 | subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; |
| 1254 | pidx = (read_start / PAGE_SIZE) % subbuf_pages; | 1252 | pidx = (read_start / PAGE_SIZE) % subbuf_pages; |
| 1255 | poff = read_start & ~PAGE_MASK; | 1253 | poff = read_start & ~PAGE_MASK; |
| 1256 | nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); | 1254 | nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); |
| 1257 | 1255 | ||
| 1258 | for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { | 1256 | for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { |
| 1259 | unsigned int this_len, this_end, private; | 1257 | unsigned int this_len, this_end, private; |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 4aa8a305aede..51dbac6a3633 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
| @@ -22,8 +22,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) | |||
| 22 | counter->parent = parent; | 22 | counter->parent = parent; |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | int res_counter_charge_locked(struct res_counter *counter, unsigned long val, | 25 | static u64 res_counter_uncharge_locked(struct res_counter *counter, |
| 26 | bool force) | 26 | unsigned long val) |
| 27 | { | ||
| 28 | if (WARN_ON(counter->usage < val)) | ||
| 29 | val = counter->usage; | ||
| 30 | |||
| 31 | counter->usage -= val; | ||
| 32 | return counter->usage; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int res_counter_charge_locked(struct res_counter *counter, | ||
| 36 | unsigned long val, bool force) | ||
| 27 | { | 37 | { |
| 28 | int ret = 0; | 38 | int ret = 0; |
| 29 | 39 | ||
| @@ -86,15 +96,6 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, | |||
| 86 | return __res_counter_charge(counter, val, limit_fail_at, true); | 96 | return __res_counter_charge(counter, val, limit_fail_at, true); |
| 87 | } | 97 | } |
| 88 | 98 | ||
| 89 | u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | ||
| 90 | { | ||
| 91 | if (WARN_ON(counter->usage < val)) | ||
| 92 | val = counter->usage; | ||
| 93 | |||
| 94 | counter->usage -= val; | ||
| 95 | return counter->usage; | ||
| 96 | } | ||
| 97 | |||
| 98 | u64 res_counter_uncharge_until(struct res_counter *counter, | 99 | u64 res_counter_uncharge_until(struct res_counter *counter, |
| 99 | struct res_counter *top, | 100 | struct res_counter *top, |
| 100 | unsigned long val) | 101 | unsigned long val) |
diff --git a/kernel/resource.c b/kernel/resource.c index 3f285dce9347..8957d686e29b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -432,11 +432,6 @@ static void resource_clip(struct resource *res, resource_size_t min, | |||
| 432 | res->end = max; | 432 | res->end = max; |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static bool resource_contains(struct resource *res1, struct resource *res2) | ||
| 436 | { | ||
| 437 | return res1->start <= res2->start && res1->end >= res2->end; | ||
| 438 | } | ||
| 439 | |||
| 440 | /* | 435 | /* |
| 441 | * Find empty slot in the resource tree with the given range and | 436 | * Find empty slot in the resource tree with the given range and |
| 442 | * alignment constraints | 437 | * alignment constraints |
| @@ -471,10 +466,11 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
| 471 | arch_remove_reservations(&tmp); | 466 | arch_remove_reservations(&tmp); |
| 472 | 467 | ||
| 473 | /* Check for overflow after ALIGN() */ | 468 | /* Check for overflow after ALIGN() */ |
| 474 | avail = *new; | ||
| 475 | avail.start = ALIGN(tmp.start, constraint->align); | 469 | avail.start = ALIGN(tmp.start, constraint->align); |
| 476 | avail.end = tmp.end; | 470 | avail.end = tmp.end; |
| 471 | avail.flags = new->flags & ~IORESOURCE_UNSET; | ||
| 477 | if (avail.start >= tmp.start) { | 472 | if (avail.start >= tmp.start) { |
| 473 | alloc.flags = avail.flags; | ||
| 478 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, | 474 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
| 479 | size, constraint->align); | 475 | size, constraint->align); |
| 480 | alloc.end = alloc.start + size - 1; | 476 | alloc.end = alloc.start + size - 1; |
| @@ -515,7 +511,7 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 515 | * @newsize: new size of the resource descriptor | 511 | * @newsize: new size of the resource descriptor |
| 516 | * @constraint: the size and alignment constraints to be met. | 512 | * @constraint: the size and alignment constraints to be met. |
| 517 | */ | 513 | */ |
| 518 | int reallocate_resource(struct resource *root, struct resource *old, | 514 | static int reallocate_resource(struct resource *root, struct resource *old, |
| 519 | resource_size_t newsize, | 515 | resource_size_t newsize, |
| 520 | struct resource_constraint *constraint) | 516 | struct resource_constraint *constraint) |
| 521 | { | 517 | { |
| @@ -949,8 +945,8 @@ struct resource * __request_region(struct resource *parent, | |||
| 949 | res->name = name; | 945 | res->name = name; |
| 950 | res->start = start; | 946 | res->start = start; |
| 951 | res->end = start + n - 1; | 947 | res->end = start + n - 1; |
| 952 | res->flags = IORESOURCE_BUSY; | 948 | res->flags = resource_type(parent); |
| 953 | res->flags |= flags; | 949 | res->flags |= IORESOURCE_BUSY | flags; |
| 954 | 950 | ||
| 955 | write_lock(&resource_lock); | 951 | write_lock(&resource_lock); |
| 956 | 952 | ||
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 9a95c8c2af2a..ab32b7b0db5c 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
| @@ -13,7 +13,7 @@ endif | |||
| 13 | 13 | ||
| 14 | obj-y += core.o proc.o clock.o cputime.o | 14 | obj-y += core.o proc.o clock.o cputime.o |
| 15 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o | 15 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o |
| 16 | obj-y += wait.o completion.o | 16 | obj-y += wait.o completion.o idle.o |
| 17 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o | 17 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o |
| 18 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o | 18 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o |
| 19 | obj-$(CONFIG_SCHEDSTATS) += stats.o | 19 | obj-$(CONFIG_SCHEDSTATS) += stats.o |
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 4a073539c58e..e73efba98301 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c | |||
| @@ -203,7 +203,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) | |||
| 203 | struct autogroup *ag; | 203 | struct autogroup *ag; |
| 204 | int err; | 204 | int err; |
| 205 | 205 | ||
| 206 | if (nice < -20 || nice > 19) | 206 | if (nice < MIN_NICE || nice > MAX_NICE) |
| 207 | return -EINVAL; | 207 | return -EINVAL; |
| 208 | 208 | ||
| 209 | err = security_task_setnice(current, nice); | 209 | err = security_task_setnice(current, nice); |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..3ef6451e972e 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -60,13 +60,14 @@ | |||
| 60 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
| 61 | #include <linux/static_key.h> | 61 | #include <linux/static_key.h> |
| 62 | #include <linux/workqueue.h> | 62 | #include <linux/workqueue.h> |
| 63 | #include <linux/compiler.h> | ||
| 63 | 64 | ||
| 64 | /* | 65 | /* |
| 65 | * Scheduler clock - returns current time in nanosec units. | 66 | * Scheduler clock - returns current time in nanosec units. |
| 66 | * This is default implementation. | 67 | * This is default implementation. |
| 67 | * Architectures and sub-architectures can override this. | 68 | * Architectures and sub-architectures can override this. |
| 68 | */ | 69 | */ |
| 69 | unsigned long long __attribute__((weak)) sched_clock(void) | 70 | unsigned long long __weak sched_clock(void) |
| 70 | { | 71 | { |
| 71 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) | 72 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
| 72 | * (NSEC_PER_SEC / HZ); | 73 | * (NSEC_PER_SEC / HZ); |
| @@ -301,14 +302,14 @@ u64 sched_clock_cpu(int cpu) | |||
| 301 | if (unlikely(!sched_clock_running)) | 302 | if (unlikely(!sched_clock_running)) |
| 302 | return 0ull; | 303 | return 0ull; |
| 303 | 304 | ||
| 304 | preempt_disable(); | 305 | preempt_disable_notrace(); |
| 305 | scd = cpu_sdc(cpu); | 306 | scd = cpu_sdc(cpu); |
| 306 | 307 | ||
| 307 | if (cpu != smp_processor_id()) | 308 | if (cpu != smp_processor_id()) |
| 308 | clock = sched_clock_remote(scd); | 309 | clock = sched_clock_remote(scd); |
| 309 | else | 310 | else |
| 310 | clock = sched_clock_local(scd); | 311 | clock = sched_clock_local(scd); |
| 311 | preempt_enable(); | 312 | preempt_enable_notrace(); |
| 312 | 313 | ||
| 313 | return clock; | 314 | return clock; |
| 314 | } | 315 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ece..268a45ea238c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | #include <linux/init_task.h> | 73 | #include <linux/init_task.h> |
| 74 | #include <linux/binfmts.h> | 74 | #include <linux/binfmts.h> |
| 75 | #include <linux/context_tracking.h> | 75 | #include <linux/context_tracking.h> |
| 76 | #include <linux/compiler.h> | ||
| 76 | 77 | ||
| 77 | #include <asm/switch_to.h> | 78 | #include <asm/switch_to.h> |
| 78 | #include <asm/tlb.h> | 79 | #include <asm/tlb.h> |
| @@ -432,7 +433,7 @@ void hrtick_start(struct rq *rq, u64 delay) | |||
| 432 | if (rq == this_rq()) { | 433 | if (rq == this_rq()) { |
| 433 | __hrtick_restart(rq); | 434 | __hrtick_restart(rq); |
| 434 | } else if (!rq->hrtick_csd_pending) { | 435 | } else if (!rq->hrtick_csd_pending) { |
| 435 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); | 436 | smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); |
| 436 | rq->hrtick_csd_pending = 1; | 437 | rq->hrtick_csd_pending = 1; |
| 437 | } | 438 | } |
| 438 | } | 439 | } |
| @@ -555,12 +556,15 @@ void resched_cpu(int cpu) | |||
| 555 | * selecting an idle cpu will add more delays to the timers than intended | 556 | * selecting an idle cpu will add more delays to the timers than intended |
| 556 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). | 557 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 557 | */ | 558 | */ |
| 558 | int get_nohz_timer_target(void) | 559 | int get_nohz_timer_target(int pinned) |
| 559 | { | 560 | { |
| 560 | int cpu = smp_processor_id(); | 561 | int cpu = smp_processor_id(); |
| 561 | int i; | 562 | int i; |
| 562 | struct sched_domain *sd; | 563 | struct sched_domain *sd; |
| 563 | 564 | ||
| 565 | if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) | ||
| 566 | return cpu; | ||
| 567 | |||
| 564 | rcu_read_lock(); | 568 | rcu_read_lock(); |
| 565 | for_each_domain(cpu, sd) { | 569 | for_each_domain(cpu, sd) { |
| 566 | for_each_cpu(i, sched_domain_span(sd)) { | 570 | for_each_cpu(i, sched_domain_span(sd)) { |
| @@ -823,19 +827,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
| 823 | #endif | 827 | #endif |
| 824 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | 828 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 825 | if (static_key_false((¶virt_steal_rq_enabled))) { | 829 | if (static_key_false((¶virt_steal_rq_enabled))) { |
| 826 | u64 st; | ||
| 827 | |||
| 828 | steal = paravirt_steal_clock(cpu_of(rq)); | 830 | steal = paravirt_steal_clock(cpu_of(rq)); |
| 829 | steal -= rq->prev_steal_time_rq; | 831 | steal -= rq->prev_steal_time_rq; |
| 830 | 832 | ||
| 831 | if (unlikely(steal > delta)) | 833 | if (unlikely(steal > delta)) |
| 832 | steal = delta; | 834 | steal = delta; |
| 833 | 835 | ||
| 834 | st = steal_ticks(steal); | ||
| 835 | steal = st * TICK_NSEC; | ||
| 836 | |||
| 837 | rq->prev_steal_time_rq += steal; | 836 | rq->prev_steal_time_rq += steal; |
| 838 | |||
| 839 | delta -= steal; | 837 | delta -= steal; |
| 840 | } | 838 | } |
| 841 | #endif | 839 | #endif |
| @@ -1745,8 +1743,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
| 1745 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; | 1743 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
| 1746 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; | 1744 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
| 1747 | p->numa_work.next = &p->numa_work; | 1745 | p->numa_work.next = &p->numa_work; |
| 1748 | p->numa_faults = NULL; | 1746 | p->numa_faults_memory = NULL; |
| 1749 | p->numa_faults_buffer = NULL; | 1747 | p->numa_faults_buffer_memory = NULL; |
| 1748 | p->last_task_numa_placement = 0; | ||
| 1749 | p->last_sum_exec_runtime = 0; | ||
| 1750 | 1750 | ||
| 1751 | INIT_LIST_HEAD(&p->numa_entry); | 1751 | INIT_LIST_HEAD(&p->numa_entry); |
| 1752 | p->numa_group = NULL; | 1752 | p->numa_group = NULL; |
| @@ -2149,8 +2149,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 2149 | if (mm) | 2149 | if (mm) |
| 2150 | mmdrop(mm); | 2150 | mmdrop(mm); |
| 2151 | if (unlikely(prev_state == TASK_DEAD)) { | 2151 | if (unlikely(prev_state == TASK_DEAD)) { |
| 2152 | task_numa_free(prev); | ||
| 2153 | |||
| 2154 | if (prev->sched_class->task_dead) | 2152 | if (prev->sched_class->task_dead) |
| 2155 | prev->sched_class->task_dead(prev); | 2153 | prev->sched_class->task_dead(prev); |
| 2156 | 2154 | ||
| @@ -2167,13 +2165,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 2167 | 2165 | ||
| 2168 | #ifdef CONFIG_SMP | 2166 | #ifdef CONFIG_SMP |
| 2169 | 2167 | ||
| 2170 | /* assumes rq->lock is held */ | ||
| 2171 | static inline void pre_schedule(struct rq *rq, struct task_struct *prev) | ||
| 2172 | { | ||
| 2173 | if (prev->sched_class->pre_schedule) | ||
| 2174 | prev->sched_class->pre_schedule(rq, prev); | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | /* rq->lock is NOT held, but preemption is disabled */ | 2168 | /* rq->lock is NOT held, but preemption is disabled */ |
| 2178 | static inline void post_schedule(struct rq *rq) | 2169 | static inline void post_schedule(struct rq *rq) |
| 2179 | { | 2170 | { |
| @@ -2191,10 +2182,6 @@ static inline void post_schedule(struct rq *rq) | |||
| 2191 | 2182 | ||
| 2192 | #else | 2183 | #else |
| 2193 | 2184 | ||
| 2194 | static inline void pre_schedule(struct rq *rq, struct task_struct *p) | ||
| 2195 | { | ||
| 2196 | } | ||
| 2197 | |||
| 2198 | static inline void post_schedule(struct rq *rq) | 2185 | static inline void post_schedule(struct rq *rq) |
| 2199 | { | 2186 | { |
| 2200 | } | 2187 | } |
| @@ -2510,8 +2497,13 @@ void __kprobes preempt_count_add(int val) | |||
| 2510 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= | 2497 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 2511 | PREEMPT_MASK - 10); | 2498 | PREEMPT_MASK - 10); |
| 2512 | #endif | 2499 | #endif |
| 2513 | if (preempt_count() == val) | 2500 | if (preempt_count() == val) { |
| 2514 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 2501 | unsigned long ip = get_parent_ip(CALLER_ADDR1); |
| 2502 | #ifdef CONFIG_DEBUG_PREEMPT | ||
| 2503 | current->preempt_disable_ip = ip; | ||
| 2504 | #endif | ||
| 2505 | trace_preempt_off(CALLER_ADDR0, ip); | ||
| 2506 | } | ||
| 2515 | } | 2507 | } |
| 2516 | EXPORT_SYMBOL(preempt_count_add); | 2508 | EXPORT_SYMBOL(preempt_count_add); |
| 2517 | 2509 | ||
| @@ -2554,6 +2546,13 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
| 2554 | print_modules(); | 2546 | print_modules(); |
| 2555 | if (irqs_disabled()) | 2547 | if (irqs_disabled()) |
| 2556 | print_irqtrace_events(prev); | 2548 | print_irqtrace_events(prev); |
| 2549 | #ifdef CONFIG_DEBUG_PREEMPT | ||
| 2550 | if (in_atomic_preempt_off()) { | ||
| 2551 | pr_err("Preemption disabled at:"); | ||
| 2552 | print_ip_sym(current->preempt_disable_ip); | ||
| 2553 | pr_cont("\n"); | ||
| 2554 | } | ||
| 2555 | #endif | ||
| 2557 | dump_stack(); | 2556 | dump_stack(); |
| 2558 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); | 2557 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
| 2559 | } | 2558 | } |
| @@ -2577,36 +2576,34 @@ static inline void schedule_debug(struct task_struct *prev) | |||
| 2577 | schedstat_inc(this_rq(), sched_count); | 2576 | schedstat_inc(this_rq(), sched_count); |
| 2578 | } | 2577 | } |
| 2579 | 2578 | ||
| 2580 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | ||
| 2581 | { | ||
| 2582 | if (prev->on_rq || rq->skip_clock_update < 0) | ||
| 2583 | update_rq_clock(rq); | ||
| 2584 | prev->sched_class->put_prev_task(rq, prev); | ||
| 2585 | } | ||
| 2586 | |||
| 2587 | /* | 2579 | /* |
| 2588 | * Pick up the highest-prio task: | 2580 | * Pick up the highest-prio task: |
| 2589 | */ | 2581 | */ |
| 2590 | static inline struct task_struct * | 2582 | static inline struct task_struct * |
| 2591 | pick_next_task(struct rq *rq) | 2583 | pick_next_task(struct rq *rq, struct task_struct *prev) |
| 2592 | { | 2584 | { |
| 2593 | const struct sched_class *class; | 2585 | const struct sched_class *class = &fair_sched_class; |
| 2594 | struct task_struct *p; | 2586 | struct task_struct *p; |
| 2595 | 2587 | ||
| 2596 | /* | 2588 | /* |
| 2597 | * Optimization: we know that if all tasks are in | 2589 | * Optimization: we know that if all tasks are in |
| 2598 | * the fair class we can call that function directly: | 2590 | * the fair class we can call that function directly: |
| 2599 | */ | 2591 | */ |
| 2600 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { | 2592 | if (likely(prev->sched_class == class && |
| 2601 | p = fair_sched_class.pick_next_task(rq); | 2593 | rq->nr_running == rq->cfs.h_nr_running)) { |
| 2602 | if (likely(p)) | 2594 | p = fair_sched_class.pick_next_task(rq, prev); |
| 2595 | if (likely(p && p != RETRY_TASK)) | ||
| 2603 | return p; | 2596 | return p; |
| 2604 | } | 2597 | } |
| 2605 | 2598 | ||
| 2599 | again: | ||
| 2606 | for_each_class(class) { | 2600 | for_each_class(class) { |
| 2607 | p = class->pick_next_task(rq); | 2601 | p = class->pick_next_task(rq, prev); |
| 2608 | if (p) | 2602 | if (p) { |
| 2603 | if (unlikely(p == RETRY_TASK)) | ||
| 2604 | goto again; | ||
| 2609 | return p; | 2605 | return p; |
| 2606 | } | ||
| 2610 | } | 2607 | } |
| 2611 | 2608 | ||
| 2612 | BUG(); /* the idle class will always have a runnable task */ | 2609 | BUG(); /* the idle class will always have a runnable task */ |
| @@ -2700,13 +2697,10 @@ need_resched: | |||
| 2700 | switch_count = &prev->nvcsw; | 2697 | switch_count = &prev->nvcsw; |
| 2701 | } | 2698 | } |
| 2702 | 2699 | ||
| 2703 | pre_schedule(rq, prev); | 2700 | if (prev->on_rq || rq->skip_clock_update < 0) |
| 2704 | 2701 | update_rq_clock(rq); | |
| 2705 | if (unlikely(!rq->nr_running)) | ||
| 2706 | idle_balance(cpu, rq); | ||
| 2707 | 2702 | ||
| 2708 | put_prev_task(rq, prev); | 2703 | next = pick_next_task(rq, prev); |
| 2709 | next = pick_next_task(rq); | ||
| 2710 | clear_tsk_need_resched(prev); | 2704 | clear_tsk_need_resched(prev); |
| 2711 | clear_preempt_need_resched(); | 2705 | clear_preempt_need_resched(); |
| 2712 | rq->skip_clock_update = 0; | 2706 | rq->skip_clock_update = 0; |
| @@ -2852,52 +2846,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | |||
| 2852 | } | 2846 | } |
| 2853 | EXPORT_SYMBOL(default_wake_function); | 2847 | EXPORT_SYMBOL(default_wake_function); |
| 2854 | 2848 | ||
| 2855 | static long __sched | ||
| 2856 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | ||
| 2857 | { | ||
| 2858 | unsigned long flags; | ||
| 2859 | wait_queue_t wait; | ||
| 2860 | |||
| 2861 | init_waitqueue_entry(&wait, current); | ||
| 2862 | |||
| 2863 | __set_current_state(state); | ||
| 2864 | |||
| 2865 | spin_lock_irqsave(&q->lock, flags); | ||
| 2866 | __add_wait_queue(q, &wait); | ||
| 2867 | spin_unlock(&q->lock); | ||
| 2868 | timeout = schedule_timeout(timeout); | ||
| 2869 | spin_lock_irq(&q->lock); | ||
| 2870 | __remove_wait_queue(q, &wait); | ||
| 2871 | spin_unlock_irqrestore(&q->lock, flags); | ||
| 2872 | |||
| 2873 | return timeout; | ||
| 2874 | } | ||
| 2875 | |||
| 2876 | void __sched interruptible_sleep_on(wait_queue_head_t *q) | ||
| 2877 | { | ||
| 2878 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 2879 | } | ||
| 2880 | EXPORT_SYMBOL(interruptible_sleep_on); | ||
| 2881 | |||
| 2882 | long __sched | ||
| 2883 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
| 2884 | { | ||
| 2885 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); | ||
| 2886 | } | ||
| 2887 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); | ||
| 2888 | |||
| 2889 | void __sched sleep_on(wait_queue_head_t *q) | ||
| 2890 | { | ||
| 2891 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 2892 | } | ||
| 2893 | EXPORT_SYMBOL(sleep_on); | ||
| 2894 | |||
| 2895 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
| 2896 | { | ||
| 2897 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); | ||
| 2898 | } | ||
| 2899 | EXPORT_SYMBOL(sleep_on_timeout); | ||
| 2900 | |||
| 2901 | #ifdef CONFIG_RT_MUTEXES | 2849 | #ifdef CONFIG_RT_MUTEXES |
| 2902 | 2850 | ||
| 2903 | /* | 2851 | /* |
| @@ -2908,7 +2856,8 @@ EXPORT_SYMBOL(sleep_on_timeout); | |||
| 2908 | * This function changes the 'effective' priority of a task. It does | 2856 | * This function changes the 'effective' priority of a task. It does |
| 2909 | * not touch ->normal_prio like __setscheduler(). | 2857 | * not touch ->normal_prio like __setscheduler(). |
| 2910 | * | 2858 | * |
| 2911 | * Used by the rt_mutex code to implement priority inheritance logic. | 2859 | * Used by the rt_mutex code to implement priority inheritance |
| 2860 | * logic. Call site only calls if the priority of the task changed. | ||
| 2912 | */ | 2861 | */ |
| 2913 | void rt_mutex_setprio(struct task_struct *p, int prio) | 2862 | void rt_mutex_setprio(struct task_struct *p, int prio) |
| 2914 | { | 2863 | { |
| @@ -2998,7 +2947,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
| 2998 | unsigned long flags; | 2947 | unsigned long flags; |
| 2999 | struct rq *rq; | 2948 | struct rq *rq; |
| 3000 | 2949 | ||
| 3001 | if (TASK_NICE(p) == nice || nice < -20 || nice > 19) | 2950 | if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) |
| 3002 | return; | 2951 | return; |
| 3003 | /* | 2952 | /* |
| 3004 | * We have to be careful, if called from sys_setpriority(), | 2953 | * We have to be careful, if called from sys_setpriority(), |
| @@ -3076,11 +3025,11 @@ SYSCALL_DEFINE1(nice, int, increment) | |||
| 3076 | if (increment > 40) | 3025 | if (increment > 40) |
| 3077 | increment = 40; | 3026 | increment = 40; |
| 3078 | 3027 | ||
| 3079 | nice = TASK_NICE(current) + increment; | 3028 | nice = task_nice(current) + increment; |
| 3080 | if (nice < -20) | 3029 | if (nice < MIN_NICE) |
| 3081 | nice = -20; | 3030 | nice = MIN_NICE; |
| 3082 | if (nice > 19) | 3031 | if (nice > MAX_NICE) |
| 3083 | nice = 19; | 3032 | nice = MAX_NICE; |
| 3084 | 3033 | ||
| 3085 | if (increment < 0 && !can_nice(current, nice)) | 3034 | if (increment < 0 && !can_nice(current, nice)) |
| 3086 | return -EPERM; | 3035 | return -EPERM; |
| @@ -3109,18 +3058,6 @@ int task_prio(const struct task_struct *p) | |||
| 3109 | } | 3058 | } |
| 3110 | 3059 | ||
| 3111 | /** | 3060 | /** |
| 3112 | * task_nice - return the nice value of a given task. | ||
| 3113 | * @p: the task in question. | ||
| 3114 | * | ||
| 3115 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
| 3116 | */ | ||
| 3117 | int task_nice(const struct task_struct *p) | ||
| 3118 | { | ||
| 3119 | return TASK_NICE(p); | ||
| 3120 | } | ||
| 3121 | EXPORT_SYMBOL(task_nice); | ||
| 3122 | |||
| 3123 | /** | ||
| 3124 | * idle_cpu - is a given cpu idle currently? | 3061 | * idle_cpu - is a given cpu idle currently? |
| 3125 | * @cpu: the processor in question. | 3062 | * @cpu: the processor in question. |
| 3126 | * | 3063 | * |
| @@ -3189,9 +3126,8 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) | |||
| 3189 | dl_se->dl_new = 1; | 3126 | dl_se->dl_new = 1; |
| 3190 | } | 3127 | } |
| 3191 | 3128 | ||
| 3192 | /* Actually do priority change: must hold pi & rq lock. */ | 3129 | static void __setscheduler_params(struct task_struct *p, |
| 3193 | static void __setscheduler(struct rq *rq, struct task_struct *p, | 3130 | const struct sched_attr *attr) |
| 3194 | const struct sched_attr *attr) | ||
| 3195 | { | 3131 | { |
| 3196 | int policy = attr->sched_policy; | 3132 | int policy = attr->sched_policy; |
| 3197 | 3133 | ||
| @@ -3211,9 +3147,21 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, | |||
| 3211 | * getparam()/getattr() don't report silly values for !rt tasks. | 3147 | * getparam()/getattr() don't report silly values for !rt tasks. |
| 3212 | */ | 3148 | */ |
| 3213 | p->rt_priority = attr->sched_priority; | 3149 | p->rt_priority = attr->sched_priority; |
| 3214 | |||
| 3215 | p->normal_prio = normal_prio(p); | 3150 | p->normal_prio = normal_prio(p); |
| 3216 | p->prio = rt_mutex_getprio(p); | 3151 | set_load_weight(p); |
| 3152 | } | ||
| 3153 | |||
| 3154 | /* Actually do priority change: must hold pi & rq lock. */ | ||
| 3155 | static void __setscheduler(struct rq *rq, struct task_struct *p, | ||
| 3156 | const struct sched_attr *attr) | ||
| 3157 | { | ||
| 3158 | __setscheduler_params(p, attr); | ||
| 3159 | |||
| 3160 | /* | ||
| 3161 | * If we get here, there was no pi waiters boosting the | ||
| 3162 | * task. It is safe to use the normal prio. | ||
| 3163 | */ | ||
| 3164 | p->prio = normal_prio(p); | ||
| 3217 | 3165 | ||
| 3218 | if (dl_prio(p->prio)) | 3166 | if (dl_prio(p->prio)) |
| 3219 | p->sched_class = &dl_sched_class; | 3167 | p->sched_class = &dl_sched_class; |
| @@ -3221,8 +3169,6 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, | |||
| 3221 | p->sched_class = &rt_sched_class; | 3169 | p->sched_class = &rt_sched_class; |
| 3222 | else | 3170 | else |
| 3223 | p->sched_class = &fair_sched_class; | 3171 | p->sched_class = &fair_sched_class; |
| 3224 | |||
| 3225 | set_load_weight(p); | ||
| 3226 | } | 3172 | } |
| 3227 | 3173 | ||
| 3228 | static void | 3174 | static void |
| @@ -3275,6 +3221,8 @@ static int __sched_setscheduler(struct task_struct *p, | |||
| 3275 | const struct sched_attr *attr, | 3221 | const struct sched_attr *attr, |
| 3276 | bool user) | 3222 | bool user) |
| 3277 | { | 3223 | { |
| 3224 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : | ||
| 3225 | MAX_RT_PRIO - 1 - attr->sched_priority; | ||
| 3278 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 3226 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
| 3279 | int policy = attr->sched_policy; | 3227 | int policy = attr->sched_policy; |
| 3280 | unsigned long flags; | 3228 | unsigned long flags; |
| @@ -3319,7 +3267,7 @@ recheck: | |||
| 3319 | */ | 3267 | */ |
| 3320 | if (user && !capable(CAP_SYS_NICE)) { | 3268 | if (user && !capable(CAP_SYS_NICE)) { |
| 3321 | if (fair_policy(policy)) { | 3269 | if (fair_policy(policy)) { |
| 3322 | if (attr->sched_nice < TASK_NICE(p) && | 3270 | if (attr->sched_nice < task_nice(p) && |
| 3323 | !can_nice(p, attr->sched_nice)) | 3271 | !can_nice(p, attr->sched_nice)) |
| 3324 | return -EPERM; | 3272 | return -EPERM; |
| 3325 | } | 3273 | } |
| @@ -3338,12 +3286,21 @@ recheck: | |||
| 3338 | return -EPERM; | 3286 | return -EPERM; |
| 3339 | } | 3287 | } |
| 3340 | 3288 | ||
| 3289 | /* | ||
| 3290 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
| 3291 | * (safest behavior); in the future we would like to allow | ||
| 3292 | * unprivileged DL tasks to increase their relative deadline | ||
| 3293 | * or reduce their runtime (both ways reducing utilization) | ||
| 3294 | */ | ||
| 3295 | if (dl_policy(policy)) | ||
| 3296 | return -EPERM; | ||
| 3297 | |||
| 3341 | /* | 3298 | /* |
| 3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3299 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3300 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
| 3344 | */ | 3301 | */ |
| 3345 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { | 3302 | if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { |
| 3346 | if (!can_nice(p, TASK_NICE(p))) | 3303 | if (!can_nice(p, task_nice(p))) |
| 3347 | return -EPERM; | 3304 | return -EPERM; |
| 3348 | } | 3305 | } |
| 3349 | 3306 | ||
| @@ -3380,16 +3337,18 @@ recheck: | |||
| 3380 | } | 3337 | } |
| 3381 | 3338 | ||
| 3382 | /* | 3339 | /* |
| 3383 | * If not changing anything there's no need to proceed further: | 3340 | * If not changing anything there's no need to proceed further, |
| 3341 | * but store a possible modification of reset_on_fork. | ||
| 3384 | */ | 3342 | */ |
| 3385 | if (unlikely(policy == p->policy)) { | 3343 | if (unlikely(policy == p->policy)) { |
| 3386 | if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p)) | 3344 | if (fair_policy(policy) && attr->sched_nice != task_nice(p)) |
| 3387 | goto change; | 3345 | goto change; |
| 3388 | if (rt_policy(policy) && attr->sched_priority != p->rt_priority) | 3346 | if (rt_policy(policy) && attr->sched_priority != p->rt_priority) |
| 3389 | goto change; | 3347 | goto change; |
| 3390 | if (dl_policy(policy)) | 3348 | if (dl_policy(policy)) |
| 3391 | goto change; | 3349 | goto change; |
| 3392 | 3350 | ||
| 3351 | p->sched_reset_on_fork = reset_on_fork; | ||
| 3393 | task_rq_unlock(rq, p, &flags); | 3352 | task_rq_unlock(rq, p, &flags); |
| 3394 | return 0; | 3353 | return 0; |
| 3395 | } | 3354 | } |
| @@ -3443,6 +3402,24 @@ change: | |||
| 3443 | return -EBUSY; | 3402 | return -EBUSY; |
| 3444 | } | 3403 | } |
| 3445 | 3404 | ||
| 3405 | p->sched_reset_on_fork = reset_on_fork; | ||
| 3406 | oldprio = p->prio; | ||
| 3407 | |||
| 3408 | /* | ||
| 3409 | * Special case for priority boosted tasks. | ||
| 3410 | * | ||
| 3411 | * If the new priority is lower or equal (user space view) | ||
| 3412 | * than the current (boosted) priority, we just store the new | ||
| 3413 | * normal parameters and do not touch the scheduler class and | ||
| 3414 | * the runqueue. This will be done when the task deboost | ||
| 3415 | * itself. | ||
| 3416 | */ | ||
| 3417 | if (rt_mutex_check_prio(p, newprio)) { | ||
| 3418 | __setscheduler_params(p, attr); | ||
| 3419 | task_rq_unlock(rq, p, &flags); | ||
| 3420 | return 0; | ||
| 3421 | } | ||
| 3422 | |||
| 3446 | on_rq = p->on_rq; | 3423 | on_rq = p->on_rq; |
| 3447 | running = task_current(rq, p); | 3424 | running = task_current(rq, p); |
| 3448 | if (on_rq) | 3425 | if (on_rq) |
| @@ -3450,16 +3427,18 @@ change: | |||
| 3450 | if (running) | 3427 | if (running) |
| 3451 | p->sched_class->put_prev_task(rq, p); | 3428 | p->sched_class->put_prev_task(rq, p); |
| 3452 | 3429 | ||
| 3453 | p->sched_reset_on_fork = reset_on_fork; | ||
| 3454 | |||
| 3455 | oldprio = p->prio; | ||
| 3456 | prev_class = p->sched_class; | 3430 | prev_class = p->sched_class; |
| 3457 | __setscheduler(rq, p, attr); | 3431 | __setscheduler(rq, p, attr); |
| 3458 | 3432 | ||
| 3459 | if (running) | 3433 | if (running) |
| 3460 | p->sched_class->set_curr_task(rq); | 3434 | p->sched_class->set_curr_task(rq); |
| 3461 | if (on_rq) | 3435 | if (on_rq) { |
| 3462 | enqueue_task(rq, p, 0); | 3436 | /* |
| 3437 | * We enqueue to tail when the priority of a task is | ||
| 3438 | * increased (user space view). | ||
| 3439 | */ | ||
| 3440 | enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); | ||
| 3441 | } | ||
| 3463 | 3442 | ||
| 3464 | check_class_changed(rq, p, prev_class, oldprio); | 3443 | check_class_changed(rq, p, prev_class, oldprio); |
| 3465 | task_rq_unlock(rq, p, &flags); | 3444 | task_rq_unlock(rq, p, &flags); |
| @@ -3615,7 +3594,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr, | |||
| 3615 | * XXX: do we want to be lenient like existing syscalls; or do we want | 3594 | * XXX: do we want to be lenient like existing syscalls; or do we want |
| 3616 | * to be strict and return an error on out-of-bounds values? | 3595 | * to be strict and return an error on out-of-bounds values? |
| 3617 | */ | 3596 | */ |
| 3618 | attr->sched_nice = clamp(attr->sched_nice, -20, 19); | 3597 | attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); |
| 3619 | 3598 | ||
| 3620 | out: | 3599 | out: |
| 3621 | return ret; | 3600 | return ret; |
| @@ -3836,7 +3815,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
| 3836 | else if (task_has_rt_policy(p)) | 3815 | else if (task_has_rt_policy(p)) |
| 3837 | attr.sched_priority = p->rt_priority; | 3816 | attr.sched_priority = p->rt_priority; |
| 3838 | else | 3817 | else |
| 3839 | attr.sched_nice = TASK_NICE(p); | 3818 | attr.sched_nice = task_nice(p); |
| 3840 | 3819 | ||
| 3841 | rcu_read_unlock(); | 3820 | rcu_read_unlock(); |
| 3842 | 3821 | ||
| @@ -4474,6 +4453,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
| 4474 | rcu_read_unlock(); | 4453 | rcu_read_unlock(); |
| 4475 | 4454 | ||
| 4476 | rq->curr = rq->idle = idle; | 4455 | rq->curr = rq->idle = idle; |
| 4456 | idle->on_rq = 1; | ||
| 4477 | #if defined(CONFIG_SMP) | 4457 | #if defined(CONFIG_SMP) |
| 4478 | idle->on_cpu = 1; | 4458 | idle->on_cpu = 1; |
| 4479 | #endif | 4459 | #endif |
| @@ -4693,8 +4673,10 @@ void idle_task_exit(void) | |||
| 4693 | 4673 | ||
| 4694 | BUG_ON(cpu_online(smp_processor_id())); | 4674 | BUG_ON(cpu_online(smp_processor_id())); |
| 4695 | 4675 | ||
| 4696 | if (mm != &init_mm) | 4676 | if (mm != &init_mm) { |
| 4697 | switch_mm(mm, &init_mm, current); | 4677 | switch_mm(mm, &init_mm, current); |
| 4678 | finish_arch_post_lock_switch(); | ||
| 4679 | } | ||
| 4698 | mmdrop(mm); | 4680 | mmdrop(mm); |
| 4699 | } | 4681 | } |
| 4700 | 4682 | ||
| @@ -4712,6 +4694,22 @@ static void calc_load_migrate(struct rq *rq) | |||
| 4712 | atomic_long_add(delta, &calc_load_tasks); | 4694 | atomic_long_add(delta, &calc_load_tasks); |
| 4713 | } | 4695 | } |
| 4714 | 4696 | ||
| 4697 | static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) | ||
| 4698 | { | ||
| 4699 | } | ||
| 4700 | |||
| 4701 | static const struct sched_class fake_sched_class = { | ||
| 4702 | .put_prev_task = put_prev_task_fake, | ||
| 4703 | }; | ||
| 4704 | |||
| 4705 | static struct task_struct fake_task = { | ||
| 4706 | /* | ||
| 4707 | * Avoid pull_{rt,dl}_task() | ||
| 4708 | */ | ||
| 4709 | .prio = MAX_PRIO + 1, | ||
| 4710 | .sched_class = &fake_sched_class, | ||
| 4711 | }; | ||
| 4712 | |||
| 4715 | /* | 4713 | /* |
| 4716 | * Migrate all tasks from the rq, sleeping tasks will be migrated by | 4714 | * Migrate all tasks from the rq, sleeping tasks will be migrated by |
| 4717 | * try_to_wake_up()->select_task_rq(). | 4715 | * try_to_wake_up()->select_task_rq(). |
| @@ -4752,7 +4750,7 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
| 4752 | if (rq->nr_running == 1) | 4750 | if (rq->nr_running == 1) |
| 4753 | break; | 4751 | break; |
| 4754 | 4752 | ||
| 4755 | next = pick_next_task(rq); | 4753 | next = pick_next_task(rq, &fake_task); |
| 4756 | BUG_ON(!next); | 4754 | BUG_ON(!next); |
| 4757 | next->sched_class->put_prev_task(rq, next); | 4755 | next->sched_class->put_prev_task(rq, next); |
| 4758 | 4756 | ||
| @@ -4842,7 +4840,7 @@ set_table_entry(struct ctl_table *entry, | |||
| 4842 | static struct ctl_table * | 4840 | static struct ctl_table * |
| 4843 | sd_alloc_ctl_domain_table(struct sched_domain *sd) | 4841 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 4844 | { | 4842 | { |
| 4845 | struct ctl_table *table = sd_alloc_ctl_entry(13); | 4843 | struct ctl_table *table = sd_alloc_ctl_entry(14); |
| 4846 | 4844 | ||
| 4847 | if (table == NULL) | 4845 | if (table == NULL) |
| 4848 | return NULL; | 4846 | return NULL; |
| @@ -4870,9 +4868,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
| 4870 | sizeof(int), 0644, proc_dointvec_minmax, false); | 4868 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 4871 | set_table_entry(&table[10], "flags", &sd->flags, | 4869 | set_table_entry(&table[10], "flags", &sd->flags, |
| 4872 | sizeof(int), 0644, proc_dointvec_minmax, false); | 4870 | sizeof(int), 0644, proc_dointvec_minmax, false); |
| 4873 | set_table_entry(&table[11], "name", sd->name, | 4871 | set_table_entry(&table[11], "max_newidle_lb_cost", |
| 4872 | &sd->max_newidle_lb_cost, | ||
| 4873 | sizeof(long), 0644, proc_doulongvec_minmax, false); | ||
| 4874 | set_table_entry(&table[12], "name", sd->name, | ||
| 4874 | CORENAME_MAX_SIZE, 0444, proc_dostring, false); | 4875 | CORENAME_MAX_SIZE, 0444, proc_dostring, false); |
| 4875 | /* &table[12] is terminator */ | 4876 | /* &table[13] is terminator */ |
| 4876 | 4877 | ||
| 4877 | return table; | 4878 | return table; |
| 4878 | } | 4879 | } |
| @@ -6452,7 +6453,7 @@ static cpumask_var_t fallback_doms; | |||
| 6452 | * cpu core maps. It is supposed to return 1 if the topology changed | 6453 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 6453 | * or 0 if it stayed the same. | 6454 | * or 0 if it stayed the same. |
| 6454 | */ | 6455 | */ |
| 6455 | int __attribute__((weak)) arch_update_cpu_topology(void) | 6456 | int __weak arch_update_cpu_topology(void) |
| 6456 | { | 6457 | { |
| 6457 | return 0; | 6458 | return 0; |
| 6458 | } | 6459 | } |
| @@ -6849,7 +6850,6 @@ void __init sched_init(void) | |||
| 6849 | 6850 | ||
| 6850 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | 6851 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
| 6851 | #ifdef CONFIG_RT_GROUP_SCHED | 6852 | #ifdef CONFIG_RT_GROUP_SCHED |
| 6852 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | ||
| 6853 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); | 6853 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
| 6854 | #endif | 6854 | #endif |
| 6855 | 6855 | ||
| @@ -6938,7 +6938,8 @@ void __might_sleep(const char *file, int line, int preempt_offset) | |||
| 6938 | static unsigned long prev_jiffy; /* ratelimiting */ | 6938 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 6939 | 6939 | ||
| 6940 | rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ | 6940 | rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ |
| 6941 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || | 6941 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && |
| 6942 | !is_idle_task(current)) || | ||
| 6942 | system_state != SYSTEM_RUNNING || oops_in_progress) | 6943 | system_state != SYSTEM_RUNNING || oops_in_progress) |
| 6943 | return; | 6944 | return; |
| 6944 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | 6945 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| @@ -6956,6 +6957,13 @@ void __might_sleep(const char *file, int line, int preempt_offset) | |||
| 6956 | debug_show_held_locks(current); | 6957 | debug_show_held_locks(current); |
| 6957 | if (irqs_disabled()) | 6958 | if (irqs_disabled()) |
| 6958 | print_irqtrace_events(current); | 6959 | print_irqtrace_events(current); |
| 6960 | #ifdef CONFIG_DEBUG_PREEMPT | ||
| 6961 | if (!preempt_count_equals(preempt_offset)) { | ||
| 6962 | pr_err("Preemption disabled at:"); | ||
| 6963 | print_ip_sym(current->preempt_disable_ip); | ||
| 6964 | pr_cont("\n"); | ||
| 6965 | } | ||
| 6966 | #endif | ||
| 6959 | dump_stack(); | 6967 | dump_stack(); |
| 6960 | } | 6968 | } |
| 6961 | EXPORT_SYMBOL(__might_sleep); | 6969 | EXPORT_SYMBOL(__might_sleep); |
| @@ -7009,7 +7017,7 @@ void normalize_rt_tasks(void) | |||
| 7009 | * Renice negative nice level userspace | 7017 | * Renice negative nice level userspace |
| 7010 | * tasks back to 0: | 7018 | * tasks back to 0: |
| 7011 | */ | 7019 | */ |
| 7012 | if (TASK_NICE(p) < 0 && p->mm) | 7020 | if (task_nice(p) < 0 && p->mm) |
| 7013 | set_user_nice(p, 0); | 7021 | set_user_nice(p, 0); |
| 7014 | continue; | 7022 | continue; |
| 7015 | } | 7023 | } |
| @@ -7177,7 +7185,7 @@ void sched_move_task(struct task_struct *tsk) | |||
| 7177 | if (unlikely(running)) | 7185 | if (unlikely(running)) |
| 7178 | tsk->sched_class->put_prev_task(rq, tsk); | 7186 | tsk->sched_class->put_prev_task(rq, tsk); |
| 7179 | 7187 | ||
| 7180 | tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, | 7188 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, |
| 7181 | lockdep_is_held(&tsk->sighand->siglock)), | 7189 | lockdep_is_held(&tsk->sighand->siglock)), |
| 7182 | struct task_group, css); | 7190 | struct task_group, css); |
| 7183 | tg = autogroup_task_group(tsk, tg); | 7191 | tg = autogroup_task_group(tsk, tg); |
| @@ -7604,7 +7612,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, | |||
| 7604 | { | 7612 | { |
| 7605 | struct task_struct *task; | 7613 | struct task_struct *task; |
| 7606 | 7614 | ||
| 7607 | cgroup_taskset_for_each(task, css, tset) { | 7615 | cgroup_taskset_for_each(task, tset) { |
| 7608 | #ifdef CONFIG_RT_GROUP_SCHED | 7616 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7609 | if (!sched_rt_can_attach(css_tg(css), task)) | 7617 | if (!sched_rt_can_attach(css_tg(css), task)) |
| 7610 | return -EINVAL; | 7618 | return -EINVAL; |
| @@ -7622,7 +7630,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, | |||
| 7622 | { | 7630 | { |
| 7623 | struct task_struct *task; | 7631 | struct task_struct *task; |
| 7624 | 7632 | ||
| 7625 | cgroup_taskset_for_each(task, css, tset) | 7633 | cgroup_taskset_for_each(task, tset) |
| 7626 | sched_move_task(task); | 7634 | sched_move_task(task); |
| 7627 | } | 7635 | } |
| 7628 | 7636 | ||
| @@ -7961,8 +7969,7 @@ static struct cftype cpu_files[] = { | |||
| 7961 | { } /* terminate */ | 7969 | { } /* terminate */ |
| 7962 | }; | 7970 | }; |
| 7963 | 7971 | ||
| 7964 | struct cgroup_subsys cpu_cgroup_subsys = { | 7972 | struct cgroup_subsys cpu_cgrp_subsys = { |
| 7965 | .name = "cpu", | ||
| 7966 | .css_alloc = cpu_cgroup_css_alloc, | 7973 | .css_alloc = cpu_cgroup_css_alloc, |
| 7967 | .css_free = cpu_cgroup_css_free, | 7974 | .css_free = cpu_cgroup_css_free, |
| 7968 | .css_online = cpu_cgroup_css_online, | 7975 | .css_online = cpu_cgroup_css_online, |
| @@ -7970,7 +7977,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| 7970 | .can_attach = cpu_cgroup_can_attach, | 7977 | .can_attach = cpu_cgroup_can_attach, |
| 7971 | .attach = cpu_cgroup_attach, | 7978 | .attach = cpu_cgroup_attach, |
| 7972 | .exit = cpu_cgroup_exit, | 7979 | .exit = cpu_cgroup_exit, |
| 7973 | .subsys_id = cpu_cgroup_subsys_id, | ||
| 7974 | .base_cftypes = cpu_files, | 7980 | .base_cftypes = cpu_files, |
| 7975 | .early_init = 1, | 7981 | .early_init = 1, |
| 7976 | }; | 7982 | }; |
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 622e0818f905..c143ee380e3a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c | |||
| @@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) | |||
| 41 | /* return cpu accounting group to which this task belongs */ | 41 | /* return cpu accounting group to which this task belongs */ |
| 42 | static inline struct cpuacct *task_ca(struct task_struct *tsk) | 42 | static inline struct cpuacct *task_ca(struct task_struct *tsk) |
| 43 | { | 43 | { |
| 44 | return css_ca(task_css(tsk, cpuacct_subsys_id)); | 44 | return css_ca(task_css(tsk, cpuacct_cgrp_id)); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline struct cpuacct *parent_ca(struct cpuacct *ca) | 47 | static inline struct cpuacct *parent_ca(struct cpuacct *ca) |
| @@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) | |||
| 275 | rcu_read_unlock(); | 275 | rcu_read_unlock(); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | struct cgroup_subsys cpuacct_subsys = { | 278 | struct cgroup_subsys cpuacct_cgrp_subsys = { |
| 279 | .name = "cpuacct", | ||
| 280 | .css_alloc = cpuacct_css_alloc, | 279 | .css_alloc = cpuacct_css_alloc, |
| 281 | .css_free = cpuacct_css_free, | 280 | .css_free = cpuacct_css_free, |
| 282 | .subsys_id = cpuacct_subsys_id, | ||
| 283 | .base_cftypes = files, | 281 | .base_cftypes = files, |
| 284 | .early_init = 1, | 282 | .early_init = 1, |
| 285 | }; | 283 | }; |
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b8838b56d1c..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
| @@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
| 70 | 70 | ||
| 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
| 72 | { | 72 | { |
| 73 | WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
| 74 | 74 | ||
| 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
| 76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
| @@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | out: | 119 | out: |
| 120 | WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
| 121 | 121 | ||
| 122 | return best_cpu; | 122 | return best_cpu; |
| 123 | } | 123 | } |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 99947919e30b..a95097cb4591 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -142,7 +142,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
| 142 | p->utimescaled += cputime_scaled; | 142 | p->utimescaled += cputime_scaled; |
| 143 | account_group_user_time(p, cputime); | 143 | account_group_user_time(p, cputime); |
| 144 | 144 | ||
| 145 | index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; | 145 | index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; |
| 146 | 146 | ||
| 147 | /* Add user time to cpustat. */ | 147 | /* Add user time to cpustat. */ |
| 148 | task_group_account_field(p, index, (__force u64) cputime); | 148 | task_group_account_field(p, index, (__force u64) cputime); |
| @@ -169,7 +169,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, | |||
| 169 | p->gtime += cputime; | 169 | p->gtime += cputime; |
| 170 | 170 | ||
| 171 | /* Add guest time to cpustat. */ | 171 | /* Add guest time to cpustat. */ |
| 172 | if (TASK_NICE(p) > 0) { | 172 | if (task_nice(p) > 0) { |
| 173 | cpustat[CPUTIME_NICE] += (__force u64) cputime; | 173 | cpustat[CPUTIME_NICE] += (__force u64) cputime; |
| 174 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; | 174 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; |
| 175 | } else { | 175 | } else { |
| @@ -258,16 +258,22 @@ static __always_inline bool steal_account_process_tick(void) | |||
| 258 | { | 258 | { |
| 259 | #ifdef CONFIG_PARAVIRT | 259 | #ifdef CONFIG_PARAVIRT |
| 260 | if (static_key_false(¶virt_steal_enabled)) { | 260 | if (static_key_false(¶virt_steal_enabled)) { |
| 261 | u64 steal, st = 0; | 261 | u64 steal; |
| 262 | cputime_t steal_ct; | ||
| 262 | 263 | ||
| 263 | steal = paravirt_steal_clock(smp_processor_id()); | 264 | steal = paravirt_steal_clock(smp_processor_id()); |
| 264 | steal -= this_rq()->prev_steal_time; | 265 | steal -= this_rq()->prev_steal_time; |
| 265 | 266 | ||
| 266 | st = steal_ticks(steal); | 267 | /* |
| 267 | this_rq()->prev_steal_time += st * TICK_NSEC; | 268 | * cputime_t may be less precise than nsecs (eg: if it's |
| 269 | * based on jiffies). Lets cast the result to cputime | ||
| 270 | * granularity and account the rest on the next rounds. | ||
| 271 | */ | ||
| 272 | steal_ct = nsecs_to_cputime(steal); | ||
| 273 | this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); | ||
| 268 | 274 | ||
| 269 | account_steal_time(st); | 275 | account_steal_time(steal_ct); |
| 270 | return st; | 276 | return steal_ct; |
| 271 | } | 277 | } |
| 272 | #endif | 278 | #endif |
| 273 | return false; | 279 | return false; |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..27ef40925525 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
| 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| 136 | { | 136 | { |
| 137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
| 138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
| 139 | 138 | ||
| 140 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
| 141 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
| @@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
| 146 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| 147 | { | 146 | { |
| 148 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
| 149 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
| 150 | 148 | ||
| 151 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
| 152 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
| @@ -212,6 +210,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq) | |||
| 212 | 210 | ||
| 213 | static int push_dl_task(struct rq *rq); | 211 | static int push_dl_task(struct rq *rq); |
| 214 | 212 | ||
| 213 | static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) | ||
| 214 | { | ||
| 215 | return dl_task(prev); | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void set_post_schedule(struct rq *rq) | ||
| 219 | { | ||
| 220 | rq->post_schedule = has_pushable_dl_tasks(rq); | ||
| 221 | } | ||
| 222 | |||
| 215 | #else | 223 | #else |
| 216 | 224 | ||
| 217 | static inline | 225 | static inline |
| @@ -234,6 +242,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
| 234 | { | 242 | { |
| 235 | } | 243 | } |
| 236 | 244 | ||
| 245 | static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) | ||
| 246 | { | ||
| 247 | return false; | ||
| 248 | } | ||
| 249 | |||
| 250 | static inline int pull_dl_task(struct rq *rq) | ||
| 251 | { | ||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | static inline void set_post_schedule(struct rq *rq) | ||
| 256 | { | ||
| 257 | } | ||
| 237 | #endif /* CONFIG_SMP */ | 258 | #endif /* CONFIG_SMP */ |
| 238 | 259 | ||
| 239 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); | 260 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); |
| @@ -564,6 +585,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
| 564 | return 1; | 585 | return 1; |
| 565 | } | 586 | } |
| 566 | 587 | ||
| 588 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
| 589 | |||
| 567 | /* | 590 | /* |
| 568 | * Update the current task's runtime statistics (provided it is still | 591 | * Update the current task's runtime statistics (provided it is still |
| 569 | * a -deadline task and has not been removed from the dl_rq). | 592 | * a -deadline task and has not been removed from the dl_rq). |
| @@ -586,8 +609,8 @@ static void update_curr_dl(struct rq *rq) | |||
| 586 | * approach need further study. | 609 | * approach need further study. |
| 587 | */ | 610 | */ |
| 588 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; | 611 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; |
| 589 | if (unlikely((s64)delta_exec < 0)) | 612 | if (unlikely((s64)delta_exec <= 0)) |
| 590 | delta_exec = 0; | 613 | return; |
| 591 | 614 | ||
| 592 | schedstat_set(curr->se.statistics.exec_max, | 615 | schedstat_set(curr->se.statistics.exec_max, |
| 593 | max(curr->se.statistics.exec_max, delta_exec)); | 616 | max(curr->se.statistics.exec_max, delta_exec)); |
| @@ -627,11 +650,13 @@ static void update_curr_dl(struct rq *rq) | |||
| 627 | struct rt_rq *rt_rq = &rq->rt; | 650 | struct rt_rq *rt_rq = &rq->rt; |
| 628 | 651 | ||
| 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 652 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 630 | rt_rq->rt_time += delta_exec; | ||
| 631 | /* | 653 | /* |
| 632 | * We'll let actual RT tasks worry about the overflow here, we | 654 | * We'll let actual RT tasks worry about the overflow here, we |
| 633 | * have our own CBS to keep us inline -- see above. | 655 | * have our own CBS to keep us inline; only account when RT |
| 656 | * bandwidth is relevant. | ||
| 634 | */ | 657 | */ |
| 658 | if (sched_rt_bandwidth_account(rt_rq)) | ||
| 659 | rt_rq->rt_time += delta_exec; | ||
| 635 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 660 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 636 | } | 661 | } |
| 637 | } | 662 | } |
| @@ -940,6 +965,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) | |||
| 940 | resched_task(rq->curr); | 965 | resched_task(rq->curr); |
| 941 | } | 966 | } |
| 942 | 967 | ||
| 968 | static int pull_dl_task(struct rq *this_rq); | ||
| 969 | |||
| 943 | #endif /* CONFIG_SMP */ | 970 | #endif /* CONFIG_SMP */ |
| 944 | 971 | ||
| 945 | /* | 972 | /* |
| @@ -986,7 +1013,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, | |||
| 986 | return rb_entry(left, struct sched_dl_entity, rb_node); | 1013 | return rb_entry(left, struct sched_dl_entity, rb_node); |
| 987 | } | 1014 | } |
| 988 | 1015 | ||
| 989 | struct task_struct *pick_next_task_dl(struct rq *rq) | 1016 | struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) |
| 990 | { | 1017 | { |
| 991 | struct sched_dl_entity *dl_se; | 1018 | struct sched_dl_entity *dl_se; |
| 992 | struct task_struct *p; | 1019 | struct task_struct *p; |
| @@ -994,9 +1021,20 @@ struct task_struct *pick_next_task_dl(struct rq *rq) | |||
| 994 | 1021 | ||
| 995 | dl_rq = &rq->dl; | 1022 | dl_rq = &rq->dl; |
| 996 | 1023 | ||
| 1024 | if (need_pull_dl_task(rq, prev)) | ||
| 1025 | pull_dl_task(rq); | ||
| 1026 | /* | ||
| 1027 | * When prev is DL, we may throttle it in put_prev_task(). | ||
| 1028 | * So, we update time before we check for dl_nr_running. | ||
| 1029 | */ | ||
| 1030 | if (prev->sched_class == &dl_sched_class) | ||
| 1031 | update_curr_dl(rq); | ||
| 1032 | |||
| 997 | if (unlikely(!dl_rq->dl_nr_running)) | 1033 | if (unlikely(!dl_rq->dl_nr_running)) |
| 998 | return NULL; | 1034 | return NULL; |
| 999 | 1035 | ||
| 1036 | put_prev_task(rq, prev); | ||
| 1037 | |||
| 1000 | dl_se = pick_next_dl_entity(rq, dl_rq); | 1038 | dl_se = pick_next_dl_entity(rq, dl_rq); |
| 1001 | BUG_ON(!dl_se); | 1039 | BUG_ON(!dl_se); |
| 1002 | 1040 | ||
| @@ -1011,9 +1049,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq) | |||
| 1011 | start_hrtick_dl(rq, p); | 1049 | start_hrtick_dl(rq, p); |
| 1012 | #endif | 1050 | #endif |
| 1013 | 1051 | ||
| 1014 | #ifdef CONFIG_SMP | 1052 | set_post_schedule(rq); |
| 1015 | rq->post_schedule = has_pushable_dl_tasks(rq); | ||
| 1016 | #endif /* CONFIG_SMP */ | ||
| 1017 | 1053 | ||
| 1018 | return p; | 1054 | return p; |
| 1019 | } | 1055 | } |
| @@ -1422,13 +1458,6 @@ skip: | |||
| 1422 | return ret; | 1458 | return ret; |
| 1423 | } | 1459 | } |
| 1424 | 1460 | ||
| 1425 | static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) | ||
| 1426 | { | ||
| 1427 | /* Try to pull other tasks here */ | ||
| 1428 | if (dl_task(prev)) | ||
| 1429 | pull_dl_task(rq); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | static void post_schedule_dl(struct rq *rq) | 1461 | static void post_schedule_dl(struct rq *rq) |
| 1433 | { | 1462 | { |
| 1434 | push_dl_tasks(rq); | 1463 | push_dl_tasks(rq); |
| @@ -1556,7 +1585,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) | |||
| 1556 | if (unlikely(p->dl.dl_throttled)) | 1585 | if (unlikely(p->dl.dl_throttled)) |
| 1557 | return; | 1586 | return; |
| 1558 | 1587 | ||
| 1559 | if (p->on_rq || rq->curr != p) { | 1588 | if (p->on_rq && rq->curr != p) { |
| 1560 | #ifdef CONFIG_SMP | 1589 | #ifdef CONFIG_SMP |
| 1561 | if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) | 1590 | if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) |
| 1562 | /* Only reschedule if pushing failed */ | 1591 | /* Only reschedule if pushing failed */ |
| @@ -1621,7 +1650,6 @@ const struct sched_class dl_sched_class = { | |||
| 1621 | .set_cpus_allowed = set_cpus_allowed_dl, | 1650 | .set_cpus_allowed = set_cpus_allowed_dl, |
| 1622 | .rq_online = rq_online_dl, | 1651 | .rq_online = rq_online_dl, |
| 1623 | .rq_offline = rq_offline_dl, | 1652 | .rq_offline = rq_offline_dl, |
| 1624 | .pre_schedule = pre_schedule_dl, | ||
| 1625 | .post_schedule = post_schedule_dl, | 1653 | .post_schedule = post_schedule_dl, |
| 1626 | .task_woken = task_woken_dl, | 1654 | .task_woken = task_woken_dl, |
| 1627 | #endif | 1655 | #endif |
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index dd52e7ffb10e..695f9773bb60 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
| @@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg) | |||
| 111 | if (autogroup_path(tg, group_path, PATH_MAX)) | 111 | if (autogroup_path(tg, group_path, PATH_MAX)) |
| 112 | return group_path; | 112 | return group_path; |
| 113 | 113 | ||
| 114 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | 114 | return cgroup_path(tg->css.cgroup, group_path, PATH_MAX); |
| 115 | return group_path; | ||
| 116 | } | 115 | } |
| 117 | #endif | 116 | #endif |
| 118 | 117 | ||
| @@ -321,6 +320,7 @@ do { \ | |||
| 321 | P(sched_goidle); | 320 | P(sched_goidle); |
| 322 | #ifdef CONFIG_SMP | 321 | #ifdef CONFIG_SMP |
| 323 | P64(avg_idle); | 322 | P64(avg_idle); |
| 323 | P64(max_idle_balance_cost); | ||
| 324 | #endif | 324 | #endif |
| 325 | 325 | ||
| 326 | P(ttwu_count); | 326 | P(ttwu_count); |
| @@ -533,15 +533,15 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) | |||
| 533 | unsigned long nr_faults = -1; | 533 | unsigned long nr_faults = -1; |
| 534 | int cpu_current, home_node; | 534 | int cpu_current, home_node; |
| 535 | 535 | ||
| 536 | if (p->numa_faults) | 536 | if (p->numa_faults_memory) |
| 537 | nr_faults = p->numa_faults[2*node + i]; | 537 | nr_faults = p->numa_faults_memory[2*node + i]; |
| 538 | 538 | ||
| 539 | cpu_current = !i ? (task_node(p) == node) : | 539 | cpu_current = !i ? (task_node(p) == node) : |
| 540 | (pol && node_isset(node, pol->v.nodes)); | 540 | (pol && node_isset(node, pol->v.nodes)); |
| 541 | 541 | ||
| 542 | home_node = (p->numa_preferred_nid == node); | 542 | home_node = (p->numa_preferred_nid == node); |
| 543 | 543 | ||
| 544 | SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n", | 544 | SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n", |
| 545 | i, node, cpu_current, home_node, nr_faults); | 545 | i, node, cpu_current, home_node, nr_faults); |
| 546 | } | 546 | } |
| 547 | } | 547 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78157099b167..7e9bd0b1fa9e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -322,13 +322,13 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | |||
| 322 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 322 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) |
| 323 | 323 | ||
| 324 | /* Do the two (enqueued) entities belong to the same group ? */ | 324 | /* Do the two (enqueued) entities belong to the same group ? */ |
| 325 | static inline int | 325 | static inline struct cfs_rq * |
| 326 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | 326 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
| 327 | { | 327 | { |
| 328 | if (se->cfs_rq == pse->cfs_rq) | 328 | if (se->cfs_rq == pse->cfs_rq) |
| 329 | return 1; | 329 | return se->cfs_rq; |
| 330 | 330 | ||
| 331 | return 0; | 331 | return NULL; |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 334 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| @@ -336,17 +336,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) | |||
| 336 | return se->parent; | 336 | return se->parent; |
| 337 | } | 337 | } |
| 338 | 338 | ||
| 339 | /* return depth at which a sched entity is present in the hierarchy */ | ||
| 340 | static inline int depth_se(struct sched_entity *se) | ||
| 341 | { | ||
| 342 | int depth = 0; | ||
| 343 | |||
| 344 | for_each_sched_entity(se) | ||
| 345 | depth++; | ||
| 346 | |||
| 347 | return depth; | ||
| 348 | } | ||
| 349 | |||
| 350 | static void | 339 | static void |
| 351 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | 340 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
| 352 | { | 341 | { |
| @@ -360,8 +349,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
| 360 | */ | 349 | */ |
| 361 | 350 | ||
| 362 | /* First walk up until both entities are at same depth */ | 351 | /* First walk up until both entities are at same depth */ |
| 363 | se_depth = depth_se(*se); | 352 | se_depth = (*se)->depth; |
| 364 | pse_depth = depth_se(*pse); | 353 | pse_depth = (*pse)->depth; |
| 365 | 354 | ||
| 366 | while (se_depth > pse_depth) { | 355 | while (se_depth > pse_depth) { |
| 367 | se_depth--; | 356 | se_depth--; |
| @@ -426,12 +415,6 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | |||
| 426 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 415 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
| 427 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 416 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) |
| 428 | 417 | ||
| 429 | static inline int | ||
| 430 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | ||
| 431 | { | ||
| 432 | return 1; | ||
| 433 | } | ||
| 434 | |||
| 435 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | 418 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| 436 | { | 419 | { |
| 437 | return NULL; | 420 | return NULL; |
| @@ -819,14 +802,6 @@ unsigned int sysctl_numa_balancing_scan_size = 256; | |||
| 819 | /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ | 802 | /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ |
| 820 | unsigned int sysctl_numa_balancing_scan_delay = 1000; | 803 | unsigned int sysctl_numa_balancing_scan_delay = 1000; |
| 821 | 804 | ||
| 822 | /* | ||
| 823 | * After skipping a page migration on a shared page, skip N more numa page | ||
| 824 | * migrations unconditionally. This reduces the number of NUMA migrations | ||
| 825 | * in shared memory workloads, and has the effect of pulling tasks towards | ||
| 826 | * where their memory lives, over pulling the memory towards the task. | ||
| 827 | */ | ||
| 828 | unsigned int sysctl_numa_balancing_migrate_deferred = 16; | ||
| 829 | |||
| 830 | static unsigned int task_nr_scan_windows(struct task_struct *p) | 805 | static unsigned int task_nr_scan_windows(struct task_struct *p) |
| 831 | { | 806 | { |
| 832 | unsigned long rss = 0; | 807 | unsigned long rss = 0; |
| @@ -893,10 +868,26 @@ struct numa_group { | |||
| 893 | struct list_head task_list; | 868 | struct list_head task_list; |
| 894 | 869 | ||
| 895 | struct rcu_head rcu; | 870 | struct rcu_head rcu; |
| 871 | nodemask_t active_nodes; | ||
| 896 | unsigned long total_faults; | 872 | unsigned long total_faults; |
| 873 | /* | ||
| 874 | * Faults_cpu is used to decide whether memory should move | ||
| 875 | * towards the CPU. As a consequence, these stats are weighted | ||
| 876 | * more by CPU use than by memory faults. | ||
| 877 | */ | ||
| 878 | unsigned long *faults_cpu; | ||
| 897 | unsigned long faults[0]; | 879 | unsigned long faults[0]; |
| 898 | }; | 880 | }; |
| 899 | 881 | ||
| 882 | /* Shared or private faults. */ | ||
| 883 | #define NR_NUMA_HINT_FAULT_TYPES 2 | ||
| 884 | |||
| 885 | /* Memory and CPU locality */ | ||
| 886 | #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) | ||
| 887 | |||
| 888 | /* Averaged statistics, and temporary buffers. */ | ||
| 889 | #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) | ||
| 890 | |||
| 900 | pid_t task_numa_group_id(struct task_struct *p) | 891 | pid_t task_numa_group_id(struct task_struct *p) |
| 901 | { | 892 | { |
| 902 | return p->numa_group ? p->numa_group->gid : 0; | 893 | return p->numa_group ? p->numa_group->gid : 0; |
| @@ -904,16 +895,16 @@ pid_t task_numa_group_id(struct task_struct *p) | |||
| 904 | 895 | ||
| 905 | static inline int task_faults_idx(int nid, int priv) | 896 | static inline int task_faults_idx(int nid, int priv) |
| 906 | { | 897 | { |
| 907 | return 2 * nid + priv; | 898 | return NR_NUMA_HINT_FAULT_TYPES * nid + priv; |
| 908 | } | 899 | } |
| 909 | 900 | ||
| 910 | static inline unsigned long task_faults(struct task_struct *p, int nid) | 901 | static inline unsigned long task_faults(struct task_struct *p, int nid) |
| 911 | { | 902 | { |
| 912 | if (!p->numa_faults) | 903 | if (!p->numa_faults_memory) |
| 913 | return 0; | 904 | return 0; |
| 914 | 905 | ||
| 915 | return p->numa_faults[task_faults_idx(nid, 0)] + | 906 | return p->numa_faults_memory[task_faults_idx(nid, 0)] + |
| 916 | p->numa_faults[task_faults_idx(nid, 1)]; | 907 | p->numa_faults_memory[task_faults_idx(nid, 1)]; |
| 917 | } | 908 | } |
| 918 | 909 | ||
| 919 | static inline unsigned long group_faults(struct task_struct *p, int nid) | 910 | static inline unsigned long group_faults(struct task_struct *p, int nid) |
| @@ -925,6 +916,12 @@ static inline unsigned long group_faults(struct task_struct *p, int nid) | |||
| 925 | p->numa_group->faults[task_faults_idx(nid, 1)]; | 916 | p->numa_group->faults[task_faults_idx(nid, 1)]; |
| 926 | } | 917 | } |
| 927 | 918 | ||
| 919 | static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) | ||
| 920 | { | ||
| 921 | return group->faults_cpu[task_faults_idx(nid, 0)] + | ||
| 922 | group->faults_cpu[task_faults_idx(nid, 1)]; | ||
| 923 | } | ||
| 924 | |||
| 928 | /* | 925 | /* |
| 929 | * These return the fraction of accesses done by a particular task, or | 926 | * These return the fraction of accesses done by a particular task, or |
| 930 | * task group, on a particular numa node. The group weight is given a | 927 | * task group, on a particular numa node. The group weight is given a |
| @@ -935,7 +932,7 @@ static inline unsigned long task_weight(struct task_struct *p, int nid) | |||
| 935 | { | 932 | { |
| 936 | unsigned long total_faults; | 933 | unsigned long total_faults; |
| 937 | 934 | ||
| 938 | if (!p->numa_faults) | 935 | if (!p->numa_faults_memory) |
| 939 | return 0; | 936 | return 0; |
| 940 | 937 | ||
| 941 | total_faults = p->total_numa_faults; | 938 | total_faults = p->total_numa_faults; |
| @@ -954,6 +951,69 @@ static inline unsigned long group_weight(struct task_struct *p, int nid) | |||
| 954 | return 1000 * group_faults(p, nid) / p->numa_group->total_faults; | 951 | return 1000 * group_faults(p, nid) / p->numa_group->total_faults; |
| 955 | } | 952 | } |
| 956 | 953 | ||
| 954 | bool should_numa_migrate_memory(struct task_struct *p, struct page * page, | ||
| 955 | int src_nid, int dst_cpu) | ||
| 956 | { | ||
| 957 | struct numa_group *ng = p->numa_group; | ||
| 958 | int dst_nid = cpu_to_node(dst_cpu); | ||
| 959 | int last_cpupid, this_cpupid; | ||
| 960 | |||
| 961 | this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); | ||
| 962 | |||
| 963 | /* | ||
| 964 | * Multi-stage node selection is used in conjunction with a periodic | ||
| 965 | * migration fault to build a temporal task<->page relation. By using | ||
| 966 | * a two-stage filter we remove short/unlikely relations. | ||
| 967 | * | ||
| 968 | * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate | ||
| 969 | * a task's usage of a particular page (n_p) per total usage of this | ||
| 970 | * page (n_t) (in a given time-span) to a probability. | ||
| 971 | * | ||
| 972 | * Our periodic faults will sample this probability and getting the | ||
| 973 | * same result twice in a row, given these samples are fully | ||
| 974 | * independent, is then given by P(n)^2, provided our sample period | ||
| 975 | * is sufficiently short compared to the usage pattern. | ||
| 976 | * | ||
| 977 | * This quadric squishes small probabilities, making it less likely we | ||
| 978 | * act on an unlikely task<->page relation. | ||
| 979 | */ | ||
| 980 | last_cpupid = page_cpupid_xchg_last(page, this_cpupid); | ||
| 981 | if (!cpupid_pid_unset(last_cpupid) && | ||
| 982 | cpupid_to_nid(last_cpupid) != dst_nid) | ||
| 983 | return false; | ||
| 984 | |||
| 985 | /* Always allow migrate on private faults */ | ||
| 986 | if (cpupid_match_pid(p, last_cpupid)) | ||
| 987 | return true; | ||
| 988 | |||
| 989 | /* A shared fault, but p->numa_group has not been set up yet. */ | ||
| 990 | if (!ng) | ||
| 991 | return true; | ||
| 992 | |||
| 993 | /* | ||
| 994 | * Do not migrate if the destination is not a node that | ||
| 995 | * is actively used by this numa group. | ||
| 996 | */ | ||
| 997 | if (!node_isset(dst_nid, ng->active_nodes)) | ||
| 998 | return false; | ||
| 999 | |||
| 1000 | /* | ||
| 1001 | * Source is a node that is not actively used by this | ||
| 1002 | * numa group, while the destination is. Migrate. | ||
| 1003 | */ | ||
| 1004 | if (!node_isset(src_nid, ng->active_nodes)) | ||
| 1005 | return true; | ||
| 1006 | |||
| 1007 | /* | ||
| 1008 | * Both source and destination are nodes in active | ||
| 1009 | * use by this numa group. Maximize memory bandwidth | ||
| 1010 | * by migrating from more heavily used groups, to less | ||
| 1011 | * heavily used ones, spreading the load around. | ||
| 1012 | * Use a 1/4 hysteresis to avoid spurious page movement. | ||
| 1013 | */ | ||
| 1014 | return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4); | ||
| 1015 | } | ||
| 1016 | |||
| 957 | static unsigned long weighted_cpuload(const int cpu); | 1017 | static unsigned long weighted_cpuload(const int cpu); |
| 958 | static unsigned long source_load(int cpu, int type); | 1018 | static unsigned long source_load(int cpu, int type); |
| 959 | static unsigned long target_load(int cpu, int type); | 1019 | static unsigned long target_load(int cpu, int type); |
| @@ -1267,7 +1327,7 @@ static int task_numa_migrate(struct task_struct *p) | |||
| 1267 | static void numa_migrate_preferred(struct task_struct *p) | 1327 | static void numa_migrate_preferred(struct task_struct *p) |
| 1268 | { | 1328 | { |
| 1269 | /* This task has no NUMA fault statistics yet */ | 1329 | /* This task has no NUMA fault statistics yet */ |
| 1270 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) | 1330 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory)) |
| 1271 | return; | 1331 | return; |
| 1272 | 1332 | ||
| 1273 | /* Periodically retry migrating the task to the preferred node */ | 1333 | /* Periodically retry migrating the task to the preferred node */ |
| @@ -1282,6 +1342,38 @@ static void numa_migrate_preferred(struct task_struct *p) | |||
| 1282 | } | 1342 | } |
| 1283 | 1343 | ||
| 1284 | /* | 1344 | /* |
| 1345 | * Find the nodes on which the workload is actively running. We do this by | ||
| 1346 | * tracking the nodes from which NUMA hinting faults are triggered. This can | ||
| 1347 | * be different from the set of nodes where the workload's memory is currently | ||
| 1348 | * located. | ||
| 1349 | * | ||
| 1350 | * The bitmask is used to make smarter decisions on when to do NUMA page | ||
| 1351 | * migrations, To prevent flip-flopping, and excessive page migrations, nodes | ||
| 1352 | * are added when they cause over 6/16 of the maximum number of faults, but | ||
| 1353 | * only removed when they drop below 3/16. | ||
| 1354 | */ | ||
| 1355 | static void update_numa_active_node_mask(struct numa_group *numa_group) | ||
| 1356 | { | ||
| 1357 | unsigned long faults, max_faults = 0; | ||
| 1358 | int nid; | ||
| 1359 | |||
| 1360 | for_each_online_node(nid) { | ||
| 1361 | faults = group_faults_cpu(numa_group, nid); | ||
| 1362 | if (faults > max_faults) | ||
| 1363 | max_faults = faults; | ||
| 1364 | } | ||
| 1365 | |||
| 1366 | for_each_online_node(nid) { | ||
| 1367 | faults = group_faults_cpu(numa_group, nid); | ||
| 1368 | if (!node_isset(nid, numa_group->active_nodes)) { | ||
| 1369 | if (faults > max_faults * 6 / 16) | ||
| 1370 | node_set(nid, numa_group->active_nodes); | ||
| 1371 | } else if (faults < max_faults * 3 / 16) | ||
| 1372 | node_clear(nid, numa_group->active_nodes); | ||
| 1373 | } | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | /* | ||
| 1285 | * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS | 1377 | * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS |
| 1286 | * increments. The more local the fault statistics are, the higher the scan | 1378 | * increments. The more local the fault statistics are, the higher the scan |
| 1287 | * period will be for the next scan window. If local/remote ratio is below | 1379 | * period will be for the next scan window. If local/remote ratio is below |
| @@ -1355,11 +1447,41 @@ static void update_task_scan_period(struct task_struct *p, | |||
| 1355 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); | 1447 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); |
| 1356 | } | 1448 | } |
| 1357 | 1449 | ||
| 1450 | /* | ||
| 1451 | * Get the fraction of time the task has been running since the last | ||
| 1452 | * NUMA placement cycle. The scheduler keeps similar statistics, but | ||
| 1453 | * decays those on a 32ms period, which is orders of magnitude off | ||
| 1454 | * from the dozens-of-seconds NUMA balancing period. Use the scheduler | ||
| 1455 | * stats only if the task is so new there are no NUMA statistics yet. | ||
| 1456 | */ | ||
| 1457 | static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) | ||
| 1458 | { | ||
| 1459 | u64 runtime, delta, now; | ||
| 1460 | /* Use the start of this time slice to avoid calculations. */ | ||
| 1461 | now = p->se.exec_start; | ||
| 1462 | runtime = p->se.sum_exec_runtime; | ||
| 1463 | |||
| 1464 | if (p->last_task_numa_placement) { | ||
| 1465 | delta = runtime - p->last_sum_exec_runtime; | ||
| 1466 | *period = now - p->last_task_numa_placement; | ||
| 1467 | } else { | ||
| 1468 | delta = p->se.avg.runnable_avg_sum; | ||
| 1469 | *period = p->se.avg.runnable_avg_period; | ||
| 1470 | } | ||
| 1471 | |||
| 1472 | p->last_sum_exec_runtime = runtime; | ||
| 1473 | p->last_task_numa_placement = now; | ||
| 1474 | |||
| 1475 | return delta; | ||
| 1476 | } | ||
| 1477 | |||
| 1358 | static void task_numa_placement(struct task_struct *p) | 1478 | static void task_numa_placement(struct task_struct *p) |
| 1359 | { | 1479 | { |
| 1360 | int seq, nid, max_nid = -1, max_group_nid = -1; | 1480 | int seq, nid, max_nid = -1, max_group_nid = -1; |
| 1361 | unsigned long max_faults = 0, max_group_faults = 0; | 1481 | unsigned long max_faults = 0, max_group_faults = 0; |
| 1362 | unsigned long fault_types[2] = { 0, 0 }; | 1482 | unsigned long fault_types[2] = { 0, 0 }; |
| 1483 | unsigned long total_faults; | ||
| 1484 | u64 runtime, period; | ||
| 1363 | spinlock_t *group_lock = NULL; | 1485 | spinlock_t *group_lock = NULL; |
| 1364 | 1486 | ||
| 1365 | seq = ACCESS_ONCE(p->mm->numa_scan_seq); | 1487 | seq = ACCESS_ONCE(p->mm->numa_scan_seq); |
| @@ -1368,6 +1490,10 @@ static void task_numa_placement(struct task_struct *p) | |||
| 1368 | p->numa_scan_seq = seq; | 1490 | p->numa_scan_seq = seq; |
| 1369 | p->numa_scan_period_max = task_scan_max(p); | 1491 | p->numa_scan_period_max = task_scan_max(p); |
| 1370 | 1492 | ||
| 1493 | total_faults = p->numa_faults_locality[0] + | ||
| 1494 | p->numa_faults_locality[1]; | ||
| 1495 | runtime = numa_get_avg_runtime(p, &period); | ||
| 1496 | |||
| 1371 | /* If the task is part of a group prevent parallel updates to group stats */ | 1497 | /* If the task is part of a group prevent parallel updates to group stats */ |
| 1372 | if (p->numa_group) { | 1498 | if (p->numa_group) { |
| 1373 | group_lock = &p->numa_group->lock; | 1499 | group_lock = &p->numa_group->lock; |
| @@ -1379,24 +1505,37 @@ static void task_numa_placement(struct task_struct *p) | |||
| 1379 | unsigned long faults = 0, group_faults = 0; | 1505 | unsigned long faults = 0, group_faults = 0; |
| 1380 | int priv, i; | 1506 | int priv, i; |
| 1381 | 1507 | ||
| 1382 | for (priv = 0; priv < 2; priv++) { | 1508 | for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { |
| 1383 | long diff; | 1509 | long diff, f_diff, f_weight; |
| 1384 | 1510 | ||
| 1385 | i = task_faults_idx(nid, priv); | 1511 | i = task_faults_idx(nid, priv); |
| 1386 | diff = -p->numa_faults[i]; | ||
| 1387 | 1512 | ||
| 1388 | /* Decay existing window, copy faults since last scan */ | 1513 | /* Decay existing window, copy faults since last scan */ |
| 1389 | p->numa_faults[i] >>= 1; | 1514 | diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2; |
| 1390 | p->numa_faults[i] += p->numa_faults_buffer[i]; | 1515 | fault_types[priv] += p->numa_faults_buffer_memory[i]; |
| 1391 | fault_types[priv] += p->numa_faults_buffer[i]; | 1516 | p->numa_faults_buffer_memory[i] = 0; |
| 1392 | p->numa_faults_buffer[i] = 0; | ||
| 1393 | 1517 | ||
| 1394 | faults += p->numa_faults[i]; | 1518 | /* |
| 1395 | diff += p->numa_faults[i]; | 1519 | * Normalize the faults_from, so all tasks in a group |
| 1520 | * count according to CPU use, instead of by the raw | ||
| 1521 | * number of faults. Tasks with little runtime have | ||
| 1522 | * little over-all impact on throughput, and thus their | ||
| 1523 | * faults are less important. | ||
| 1524 | */ | ||
| 1525 | f_weight = div64_u64(runtime << 16, period + 1); | ||
| 1526 | f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) / | ||
| 1527 | (total_faults + 1); | ||
| 1528 | f_diff = f_weight - p->numa_faults_cpu[i] / 2; | ||
| 1529 | p->numa_faults_buffer_cpu[i] = 0; | ||
| 1530 | |||
| 1531 | p->numa_faults_memory[i] += diff; | ||
| 1532 | p->numa_faults_cpu[i] += f_diff; | ||
| 1533 | faults += p->numa_faults_memory[i]; | ||
| 1396 | p->total_numa_faults += diff; | 1534 | p->total_numa_faults += diff; |
| 1397 | if (p->numa_group) { | 1535 | if (p->numa_group) { |
| 1398 | /* safe because we can only change our own group */ | 1536 | /* safe because we can only change our own group */ |
| 1399 | p->numa_group->faults[i] += diff; | 1537 | p->numa_group->faults[i] += diff; |
| 1538 | p->numa_group->faults_cpu[i] += f_diff; | ||
| 1400 | p->numa_group->total_faults += diff; | 1539 | p->numa_group->total_faults += diff; |
| 1401 | group_faults += p->numa_group->faults[i]; | 1540 | group_faults += p->numa_group->faults[i]; |
| 1402 | } | 1541 | } |
| @@ -1416,6 +1555,7 @@ static void task_numa_placement(struct task_struct *p) | |||
| 1416 | update_task_scan_period(p, fault_types[0], fault_types[1]); | 1555 | update_task_scan_period(p, fault_types[0], fault_types[1]); |
| 1417 | 1556 | ||
| 1418 | if (p->numa_group) { | 1557 | if (p->numa_group) { |
| 1558 | update_numa_active_node_mask(p->numa_group); | ||
| 1419 | /* | 1559 | /* |
| 1420 | * If the preferred task and group nids are different, | 1560 | * If the preferred task and group nids are different, |
| 1421 | * iterate over the nodes again to find the best place. | 1561 | * iterate over the nodes again to find the best place. |
| @@ -1465,7 +1605,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
| 1465 | 1605 | ||
| 1466 | if (unlikely(!p->numa_group)) { | 1606 | if (unlikely(!p->numa_group)) { |
| 1467 | unsigned int size = sizeof(struct numa_group) + | 1607 | unsigned int size = sizeof(struct numa_group) + |
| 1468 | 2*nr_node_ids*sizeof(unsigned long); | 1608 | 4*nr_node_ids*sizeof(unsigned long); |
| 1469 | 1609 | ||
| 1470 | grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 1610 | grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
| 1471 | if (!grp) | 1611 | if (!grp) |
| @@ -1475,9 +1615,14 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
| 1475 | spin_lock_init(&grp->lock); | 1615 | spin_lock_init(&grp->lock); |
| 1476 | INIT_LIST_HEAD(&grp->task_list); | 1616 | INIT_LIST_HEAD(&grp->task_list); |
| 1477 | grp->gid = p->pid; | 1617 | grp->gid = p->pid; |
| 1618 | /* Second half of the array tracks nids where faults happen */ | ||
| 1619 | grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * | ||
| 1620 | nr_node_ids; | ||
| 1621 | |||
| 1622 | node_set(task_node(current), grp->active_nodes); | ||
| 1478 | 1623 | ||
| 1479 | for (i = 0; i < 2*nr_node_ids; i++) | 1624 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
| 1480 | grp->faults[i] = p->numa_faults[i]; | 1625 | grp->faults[i] = p->numa_faults_memory[i]; |
| 1481 | 1626 | ||
| 1482 | grp->total_faults = p->total_numa_faults; | 1627 | grp->total_faults = p->total_numa_faults; |
| 1483 | 1628 | ||
| @@ -1534,9 +1679,9 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
| 1534 | 1679 | ||
| 1535 | double_lock(&my_grp->lock, &grp->lock); | 1680 | double_lock(&my_grp->lock, &grp->lock); |
| 1536 | 1681 | ||
| 1537 | for (i = 0; i < 2*nr_node_ids; i++) { | 1682 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { |
| 1538 | my_grp->faults[i] -= p->numa_faults[i]; | 1683 | my_grp->faults[i] -= p->numa_faults_memory[i]; |
| 1539 | grp->faults[i] += p->numa_faults[i]; | 1684 | grp->faults[i] += p->numa_faults_memory[i]; |
| 1540 | } | 1685 | } |
| 1541 | my_grp->total_faults -= p->total_numa_faults; | 1686 | my_grp->total_faults -= p->total_numa_faults; |
| 1542 | grp->total_faults += p->total_numa_faults; | 1687 | grp->total_faults += p->total_numa_faults; |
| @@ -1562,12 +1707,12 @@ void task_numa_free(struct task_struct *p) | |||
| 1562 | { | 1707 | { |
| 1563 | struct numa_group *grp = p->numa_group; | 1708 | struct numa_group *grp = p->numa_group; |
| 1564 | int i; | 1709 | int i; |
| 1565 | void *numa_faults = p->numa_faults; | 1710 | void *numa_faults = p->numa_faults_memory; |
| 1566 | 1711 | ||
| 1567 | if (grp) { | 1712 | if (grp) { |
| 1568 | spin_lock(&grp->lock); | 1713 | spin_lock(&grp->lock); |
| 1569 | for (i = 0; i < 2*nr_node_ids; i++) | 1714 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
| 1570 | grp->faults[i] -= p->numa_faults[i]; | 1715 | grp->faults[i] -= p->numa_faults_memory[i]; |
| 1571 | grp->total_faults -= p->total_numa_faults; | 1716 | grp->total_faults -= p->total_numa_faults; |
| 1572 | 1717 | ||
| 1573 | list_del(&p->numa_entry); | 1718 | list_del(&p->numa_entry); |
| @@ -1577,18 +1722,21 @@ void task_numa_free(struct task_struct *p) | |||
| 1577 | put_numa_group(grp); | 1722 | put_numa_group(grp); |
| 1578 | } | 1723 | } |
| 1579 | 1724 | ||
| 1580 | p->numa_faults = NULL; | 1725 | p->numa_faults_memory = NULL; |
| 1581 | p->numa_faults_buffer = NULL; | 1726 | p->numa_faults_buffer_memory = NULL; |
| 1727 | p->numa_faults_cpu= NULL; | ||
| 1728 | p->numa_faults_buffer_cpu = NULL; | ||
| 1582 | kfree(numa_faults); | 1729 | kfree(numa_faults); |
| 1583 | } | 1730 | } |
| 1584 | 1731 | ||
| 1585 | /* | 1732 | /* |
| 1586 | * Got a PROT_NONE fault for a page on @node. | 1733 | * Got a PROT_NONE fault for a page on @node. |
| 1587 | */ | 1734 | */ |
| 1588 | void task_numa_fault(int last_cpupid, int node, int pages, int flags) | 1735 | void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) |
| 1589 | { | 1736 | { |
| 1590 | struct task_struct *p = current; | 1737 | struct task_struct *p = current; |
| 1591 | bool migrated = flags & TNF_MIGRATED; | 1738 | bool migrated = flags & TNF_MIGRATED; |
| 1739 | int cpu_node = task_node(current); | ||
| 1592 | int priv; | 1740 | int priv; |
| 1593 | 1741 | ||
| 1594 | if (!numabalancing_enabled) | 1742 | if (!numabalancing_enabled) |
| @@ -1603,16 +1751,24 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) | |||
| 1603 | return; | 1751 | return; |
| 1604 | 1752 | ||
| 1605 | /* Allocate buffer to track faults on a per-node basis */ | 1753 | /* Allocate buffer to track faults on a per-node basis */ |
| 1606 | if (unlikely(!p->numa_faults)) { | 1754 | if (unlikely(!p->numa_faults_memory)) { |
| 1607 | int size = sizeof(*p->numa_faults) * 2 * nr_node_ids; | 1755 | int size = sizeof(*p->numa_faults_memory) * |
| 1756 | NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; | ||
| 1608 | 1757 | ||
| 1609 | /* numa_faults and numa_faults_buffer share the allocation */ | 1758 | p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); |
| 1610 | p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN); | 1759 | if (!p->numa_faults_memory) |
| 1611 | if (!p->numa_faults) | ||
| 1612 | return; | 1760 | return; |
| 1613 | 1761 | ||
| 1614 | BUG_ON(p->numa_faults_buffer); | 1762 | BUG_ON(p->numa_faults_buffer_memory); |
| 1615 | p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids); | 1763 | /* |
| 1764 | * The averaged statistics, shared & private, memory & cpu, | ||
| 1765 | * occupy the first half of the array. The second half of the | ||
| 1766 | * array is for current counters, which are averaged into the | ||
| 1767 | * first set by task_numa_placement. | ||
| 1768 | */ | ||
| 1769 | p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids); | ||
| 1770 | p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids); | ||
| 1771 | p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids); | ||
| 1616 | p->total_numa_faults = 0; | 1772 | p->total_numa_faults = 0; |
| 1617 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); | 1773 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); |
| 1618 | } | 1774 | } |
| @@ -1641,7 +1797,8 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) | |||
| 1641 | if (migrated) | 1797 | if (migrated) |
| 1642 | p->numa_pages_migrated += pages; | 1798 | p->numa_pages_migrated += pages; |
| 1643 | 1799 | ||
| 1644 | p->numa_faults_buffer[task_faults_idx(node, priv)] += pages; | 1800 | p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages; |
| 1801 | p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages; | ||
| 1645 | p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages; | 1802 | p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages; |
| 1646 | } | 1803 | } |
| 1647 | 1804 | ||
| @@ -2219,13 +2376,20 @@ static inline void __update_group_entity_contrib(struct sched_entity *se) | |||
| 2219 | se->avg.load_avg_contrib >>= NICE_0_SHIFT; | 2376 | se->avg.load_avg_contrib >>= NICE_0_SHIFT; |
| 2220 | } | 2377 | } |
| 2221 | } | 2378 | } |
| 2222 | #else | 2379 | |
| 2380 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) | ||
| 2381 | { | ||
| 2382 | __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable); | ||
| 2383 | __update_tg_runnable_avg(&rq->avg, &rq->cfs); | ||
| 2384 | } | ||
| 2385 | #else /* CONFIG_FAIR_GROUP_SCHED */ | ||
| 2223 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, | 2386 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, |
| 2224 | int force_update) {} | 2387 | int force_update) {} |
| 2225 | static inline void __update_tg_runnable_avg(struct sched_avg *sa, | 2388 | static inline void __update_tg_runnable_avg(struct sched_avg *sa, |
| 2226 | struct cfs_rq *cfs_rq) {} | 2389 | struct cfs_rq *cfs_rq) {} |
| 2227 | static inline void __update_group_entity_contrib(struct sched_entity *se) {} | 2390 | static inline void __update_group_entity_contrib(struct sched_entity *se) {} |
| 2228 | #endif | 2391 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} |
| 2392 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
| 2229 | 2393 | ||
| 2230 | static inline void __update_task_entity_contrib(struct sched_entity *se) | 2394 | static inline void __update_task_entity_contrib(struct sched_entity *se) |
| 2231 | { | 2395 | { |
| @@ -2323,12 +2487,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) | |||
| 2323 | __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); | 2487 | __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); |
| 2324 | } | 2488 | } |
| 2325 | 2489 | ||
| 2326 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) | ||
| 2327 | { | ||
| 2328 | __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable); | ||
| 2329 | __update_tg_runnable_avg(&rq->avg, &rq->cfs); | ||
| 2330 | } | ||
| 2331 | |||
| 2332 | /* Add the load generated by se into cfs_rq's child load-average */ | 2490 | /* Add the load generated by se into cfs_rq's child load-average */ |
| 2333 | static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, | 2491 | static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, |
| 2334 | struct sched_entity *se, | 2492 | struct sched_entity *se, |
| @@ -2416,7 +2574,10 @@ void idle_exit_fair(struct rq *this_rq) | |||
| 2416 | update_rq_runnable_avg(this_rq, 0); | 2574 | update_rq_runnable_avg(this_rq, 0); |
| 2417 | } | 2575 | } |
| 2418 | 2576 | ||
| 2419 | #else | 2577 | static int idle_balance(struct rq *this_rq); |
| 2578 | |||
| 2579 | #else /* CONFIG_SMP */ | ||
| 2580 | |||
| 2420 | static inline void update_entity_load_avg(struct sched_entity *se, | 2581 | static inline void update_entity_load_avg(struct sched_entity *se, |
| 2421 | int update_cfs_rq) {} | 2582 | int update_cfs_rq) {} |
| 2422 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} | 2583 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} |
| @@ -2428,7 +2589,13 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, | |||
| 2428 | int sleep) {} | 2589 | int sleep) {} |
| 2429 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, | 2590 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, |
| 2430 | int force_update) {} | 2591 | int force_update) {} |
| 2431 | #endif | 2592 | |
| 2593 | static inline int idle_balance(struct rq *rq) | ||
| 2594 | { | ||
| 2595 | return 0; | ||
| 2596 | } | ||
| 2597 | |||
| 2598 | #endif /* CONFIG_SMP */ | ||
| 2432 | 2599 | ||
| 2433 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 2600 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 2434 | { | 2601 | { |
| @@ -2578,10 +2745,10 @@ static void __clear_buddies_last(struct sched_entity *se) | |||
| 2578 | { | 2745 | { |
| 2579 | for_each_sched_entity(se) { | 2746 | for_each_sched_entity(se) { |
| 2580 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 2747 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 2581 | if (cfs_rq->last == se) | 2748 | if (cfs_rq->last != se) |
| 2582 | cfs_rq->last = NULL; | ||
| 2583 | else | ||
| 2584 | break; | 2749 | break; |
| 2750 | |||
| 2751 | cfs_rq->last = NULL; | ||
| 2585 | } | 2752 | } |
| 2586 | } | 2753 | } |
| 2587 | 2754 | ||
| @@ -2589,10 +2756,10 @@ static void __clear_buddies_next(struct sched_entity *se) | |||
| 2589 | { | 2756 | { |
| 2590 | for_each_sched_entity(se) { | 2757 | for_each_sched_entity(se) { |
| 2591 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 2758 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 2592 | if (cfs_rq->next == se) | 2759 | if (cfs_rq->next != se) |
| 2593 | cfs_rq->next = NULL; | ||
| 2594 | else | ||
| 2595 | break; | 2760 | break; |
| 2761 | |||
| 2762 | cfs_rq->next = NULL; | ||
| 2596 | } | 2763 | } |
| 2597 | } | 2764 | } |
| 2598 | 2765 | ||
| @@ -2600,10 +2767,10 @@ static void __clear_buddies_skip(struct sched_entity *se) | |||
| 2600 | { | 2767 | { |
| 2601 | for_each_sched_entity(se) { | 2768 | for_each_sched_entity(se) { |
| 2602 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 2769 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 2603 | if (cfs_rq->skip == se) | 2770 | if (cfs_rq->skip != se) |
| 2604 | cfs_rq->skip = NULL; | ||
| 2605 | else | ||
| 2606 | break; | 2771 | break; |
| 2772 | |||
| 2773 | cfs_rq->skip = NULL; | ||
| 2607 | } | 2774 | } |
| 2608 | } | 2775 | } |
| 2609 | 2776 | ||
| @@ -2746,17 +2913,36 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
| 2746 | * 3) pick the "last" process, for cache locality | 2913 | * 3) pick the "last" process, for cache locality |
| 2747 | * 4) do not run the "skip" process, if something else is available | 2914 | * 4) do not run the "skip" process, if something else is available |
| 2748 | */ | 2915 | */ |
| 2749 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 2916 | static struct sched_entity * |
| 2917 | pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) | ||
| 2750 | { | 2918 | { |
| 2751 | struct sched_entity *se = __pick_first_entity(cfs_rq); | 2919 | struct sched_entity *left = __pick_first_entity(cfs_rq); |
| 2752 | struct sched_entity *left = se; | 2920 | struct sched_entity *se; |
| 2921 | |||
| 2922 | /* | ||
| 2923 | * If curr is set we have to see if its left of the leftmost entity | ||
| 2924 | * still in the tree, provided there was anything in the tree at all. | ||
| 2925 | */ | ||
| 2926 | if (!left || (curr && entity_before(curr, left))) | ||
| 2927 | left = curr; | ||
| 2928 | |||
| 2929 | se = left; /* ideally we run the leftmost entity */ | ||
| 2753 | 2930 | ||
| 2754 | /* | 2931 | /* |
| 2755 | * Avoid running the skip buddy, if running something else can | 2932 | * Avoid running the skip buddy, if running something else can |
| 2756 | * be done without getting too unfair. | 2933 | * be done without getting too unfair. |
| 2757 | */ | 2934 | */ |
| 2758 | if (cfs_rq->skip == se) { | 2935 | if (cfs_rq->skip == se) { |
| 2759 | struct sched_entity *second = __pick_next_entity(se); | 2936 | struct sched_entity *second; |
| 2937 | |||
| 2938 | if (se == curr) { | ||
| 2939 | second = __pick_first_entity(cfs_rq); | ||
| 2940 | } else { | ||
| 2941 | second = __pick_next_entity(se); | ||
| 2942 | if (!second || (curr && entity_before(curr, second))) | ||
| 2943 | second = curr; | ||
| 2944 | } | ||
| 2945 | |||
| 2760 | if (second && wakeup_preempt_entity(second, left) < 1) | 2946 | if (second && wakeup_preempt_entity(second, left) < 1) |
| 2761 | se = second; | 2947 | se = second; |
| 2762 | } | 2948 | } |
| @@ -2778,7 +2964,7 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | |||
| 2778 | return se; | 2964 | return se; |
| 2779 | } | 2965 | } |
| 2780 | 2966 | ||
| 2781 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); | 2967 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); |
| 2782 | 2968 | ||
| 2783 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 2969 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) |
| 2784 | { | 2970 | { |
| @@ -3433,22 +3619,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | |||
| 3433 | } | 3619 | } |
| 3434 | 3620 | ||
| 3435 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ | 3621 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ |
| 3436 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 3622 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 3437 | { | 3623 | { |
| 3438 | if (!cfs_bandwidth_used()) | 3624 | if (!cfs_bandwidth_used()) |
| 3439 | return; | 3625 | return false; |
| 3440 | 3626 | ||
| 3441 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) | 3627 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) |
| 3442 | return; | 3628 | return false; |
| 3443 | 3629 | ||
| 3444 | /* | 3630 | /* |
| 3445 | * it's possible for a throttled entity to be forced into a running | 3631 | * it's possible for a throttled entity to be forced into a running |
| 3446 | * state (e.g. set_curr_task), in this case we're finished. | 3632 | * state (e.g. set_curr_task), in this case we're finished. |
| 3447 | */ | 3633 | */ |
| 3448 | if (cfs_rq_throttled(cfs_rq)) | 3634 | if (cfs_rq_throttled(cfs_rq)) |
| 3449 | return; | 3635 | return true; |
| 3450 | 3636 | ||
| 3451 | throttle_cfs_rq(cfs_rq); | 3637 | throttle_cfs_rq(cfs_rq); |
| 3638 | return true; | ||
| 3452 | } | 3639 | } |
| 3453 | 3640 | ||
| 3454 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) | 3641 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) |
| @@ -3558,7 +3745,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) | |||
| 3558 | } | 3745 | } |
| 3559 | 3746 | ||
| 3560 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} | 3747 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
| 3561 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3748 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } |
| 3562 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 3749 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
| 3563 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3750 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 3564 | 3751 | ||
| @@ -4213,13 +4400,14 @@ done: | |||
| 4213 | } | 4400 | } |
| 4214 | 4401 | ||
| 4215 | /* | 4402 | /* |
| 4216 | * sched_balance_self: balance the current task (running on cpu) in domains | 4403 | * select_task_rq_fair: Select target runqueue for the waking task in domains |
| 4217 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | 4404 | * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, |
| 4218 | * SD_BALANCE_EXEC. | 4405 | * SD_BALANCE_FORK, or SD_BALANCE_EXEC. |
| 4219 | * | 4406 | * |
| 4220 | * Balance, ie. select the least loaded group. | 4407 | * Balances load by selecting the idlest cpu in the idlest group, or under |
| 4408 | * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set. | ||
| 4221 | * | 4409 | * |
| 4222 | * Returns the target CPU number, or the same CPU if no balancing is needed. | 4410 | * Returns the target cpu number. |
| 4223 | * | 4411 | * |
| 4224 | * preempt must be disabled. | 4412 | * preempt must be disabled. |
| 4225 | */ | 4413 | */ |
| @@ -4494,26 +4682,124 @@ preempt: | |||
| 4494 | set_last_buddy(se); | 4682 | set_last_buddy(se); |
| 4495 | } | 4683 | } |
| 4496 | 4684 | ||
| 4497 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 4685 | static struct task_struct * |
| 4686 | pick_next_task_fair(struct rq *rq, struct task_struct *prev) | ||
| 4498 | { | 4687 | { |
| 4499 | struct task_struct *p; | ||
| 4500 | struct cfs_rq *cfs_rq = &rq->cfs; | 4688 | struct cfs_rq *cfs_rq = &rq->cfs; |
| 4501 | struct sched_entity *se; | 4689 | struct sched_entity *se; |
| 4690 | struct task_struct *p; | ||
| 4691 | int new_tasks; | ||
| 4502 | 4692 | ||
| 4693 | again: | ||
| 4694 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 4503 | if (!cfs_rq->nr_running) | 4695 | if (!cfs_rq->nr_running) |
| 4504 | return NULL; | 4696 | goto idle; |
| 4697 | |||
| 4698 | if (prev->sched_class != &fair_sched_class) | ||
| 4699 | goto simple; | ||
| 4700 | |||
| 4701 | /* | ||
| 4702 | * Because of the set_next_buddy() in dequeue_task_fair() it is rather | ||
| 4703 | * likely that a next task is from the same cgroup as the current. | ||
| 4704 | * | ||
| 4705 | * Therefore attempt to avoid putting and setting the entire cgroup | ||
| 4706 | * hierarchy, only change the part that actually changes. | ||
| 4707 | */ | ||
| 4708 | |||
| 4709 | do { | ||
| 4710 | struct sched_entity *curr = cfs_rq->curr; | ||
| 4711 | |||
| 4712 | /* | ||
| 4713 | * Since we got here without doing put_prev_entity() we also | ||
| 4714 | * have to consider cfs_rq->curr. If it is still a runnable | ||
| 4715 | * entity, update_curr() will update its vruntime, otherwise | ||
| 4716 | * forget we've ever seen it. | ||
| 4717 | */ | ||
| 4718 | if (curr && curr->on_rq) | ||
| 4719 | update_curr(cfs_rq); | ||
| 4720 | else | ||
| 4721 | curr = NULL; | ||
| 4722 | |||
| 4723 | /* | ||
| 4724 | * This call to check_cfs_rq_runtime() will do the throttle and | ||
| 4725 | * dequeue its entity in the parent(s). Therefore the 'simple' | ||
| 4726 | * nr_running test will indeed be correct. | ||
| 4727 | */ | ||
| 4728 | if (unlikely(check_cfs_rq_runtime(cfs_rq))) | ||
| 4729 | goto simple; | ||
| 4730 | |||
| 4731 | se = pick_next_entity(cfs_rq, curr); | ||
| 4732 | cfs_rq = group_cfs_rq(se); | ||
| 4733 | } while (cfs_rq); | ||
| 4734 | |||
| 4735 | p = task_of(se); | ||
| 4736 | |||
| 4737 | /* | ||
| 4738 | * Since we haven't yet done put_prev_entity and if the selected task | ||
| 4739 | * is a different task than we started out with, try and touch the | ||
| 4740 | * least amount of cfs_rqs. | ||
| 4741 | */ | ||
| 4742 | if (prev != p) { | ||
| 4743 | struct sched_entity *pse = &prev->se; | ||
| 4744 | |||
| 4745 | while (!(cfs_rq = is_same_group(se, pse))) { | ||
| 4746 | int se_depth = se->depth; | ||
| 4747 | int pse_depth = pse->depth; | ||
| 4748 | |||
| 4749 | if (se_depth <= pse_depth) { | ||
| 4750 | put_prev_entity(cfs_rq_of(pse), pse); | ||
| 4751 | pse = parent_entity(pse); | ||
| 4752 | } | ||
| 4753 | if (se_depth >= pse_depth) { | ||
| 4754 | set_next_entity(cfs_rq_of(se), se); | ||
| 4755 | se = parent_entity(se); | ||
| 4756 | } | ||
| 4757 | } | ||
| 4758 | |||
| 4759 | put_prev_entity(cfs_rq, pse); | ||
| 4760 | set_next_entity(cfs_rq, se); | ||
| 4761 | } | ||
| 4762 | |||
| 4763 | if (hrtick_enabled(rq)) | ||
| 4764 | hrtick_start_fair(rq, p); | ||
| 4765 | |||
| 4766 | return p; | ||
| 4767 | simple: | ||
| 4768 | cfs_rq = &rq->cfs; | ||
| 4769 | #endif | ||
| 4770 | |||
| 4771 | if (!cfs_rq->nr_running) | ||
| 4772 | goto idle; | ||
| 4773 | |||
| 4774 | put_prev_task(rq, prev); | ||
| 4505 | 4775 | ||
| 4506 | do { | 4776 | do { |
| 4507 | se = pick_next_entity(cfs_rq); | 4777 | se = pick_next_entity(cfs_rq, NULL); |
| 4508 | set_next_entity(cfs_rq, se); | 4778 | set_next_entity(cfs_rq, se); |
| 4509 | cfs_rq = group_cfs_rq(se); | 4779 | cfs_rq = group_cfs_rq(se); |
| 4510 | } while (cfs_rq); | 4780 | } while (cfs_rq); |
| 4511 | 4781 | ||
| 4512 | p = task_of(se); | 4782 | p = task_of(se); |
| 4783 | |||
| 4513 | if (hrtick_enabled(rq)) | 4784 | if (hrtick_enabled(rq)) |
| 4514 | hrtick_start_fair(rq, p); | 4785 | hrtick_start_fair(rq, p); |
| 4515 | 4786 | ||
| 4516 | return p; | 4787 | return p; |
| 4788 | |||
| 4789 | idle: | ||
| 4790 | new_tasks = idle_balance(rq); | ||
| 4791 | /* | ||
| 4792 | * Because idle_balance() releases (and re-acquires) rq->lock, it is | ||
| 4793 | * possible for any higher priority task to appear. In that case we | ||
| 4794 | * must re-start the pick_next_entity() loop. | ||
| 4795 | */ | ||
| 4796 | if (new_tasks < 0) | ||
| 4797 | return RETRY_TASK; | ||
| 4798 | |||
| 4799 | if (new_tasks > 0) | ||
| 4800 | goto again; | ||
| 4801 | |||
| 4802 | return NULL; | ||
| 4517 | } | 4803 | } |
| 4518 | 4804 | ||
| 4519 | /* | 4805 | /* |
| @@ -4751,7 +5037,7 @@ static void move_task(struct task_struct *p, struct lb_env *env) | |||
| 4751 | * Is this task likely cache-hot: | 5037 | * Is this task likely cache-hot: |
| 4752 | */ | 5038 | */ |
| 4753 | static int | 5039 | static int |
| 4754 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | 5040 | task_hot(struct task_struct *p, u64 now) |
| 4755 | { | 5041 | { |
| 4756 | s64 delta; | 5042 | s64 delta; |
| 4757 | 5043 | ||
| @@ -4785,7 +5071,7 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env) | |||
| 4785 | { | 5071 | { |
| 4786 | int src_nid, dst_nid; | 5072 | int src_nid, dst_nid; |
| 4787 | 5073 | ||
| 4788 | if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults || | 5074 | if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory || |
| 4789 | !(env->sd->flags & SD_NUMA)) { | 5075 | !(env->sd->flags & SD_NUMA)) { |
| 4790 | return false; | 5076 | return false; |
| 4791 | } | 5077 | } |
| @@ -4816,7 +5102,7 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env) | |||
| 4816 | if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER)) | 5102 | if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER)) |
| 4817 | return false; | 5103 | return false; |
| 4818 | 5104 | ||
| 4819 | if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) | 5105 | if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA)) |
| 4820 | return false; | 5106 | return false; |
| 4821 | 5107 | ||
| 4822 | src_nid = cpu_to_node(env->src_cpu); | 5108 | src_nid = cpu_to_node(env->src_cpu); |
| @@ -4912,7 +5198,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) | |||
| 4912 | * 2) task is cache cold, or | 5198 | * 2) task is cache cold, or |
| 4913 | * 3) too many balance attempts have failed. | 5199 | * 3) too many balance attempts have failed. |
| 4914 | */ | 5200 | */ |
| 4915 | tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd); | 5201 | tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq)); |
| 4916 | if (!tsk_cache_hot) | 5202 | if (!tsk_cache_hot) |
| 4917 | tsk_cache_hot = migrate_degrades_locality(p, env); | 5203 | tsk_cache_hot = migrate_degrades_locality(p, env); |
| 4918 | 5204 | ||
| @@ -5775,12 +6061,10 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
| 5775 | pwr_now /= SCHED_POWER_SCALE; | 6061 | pwr_now /= SCHED_POWER_SCALE; |
| 5776 | 6062 | ||
| 5777 | /* Amount of load we'd subtract */ | 6063 | /* Amount of load we'd subtract */ |
| 5778 | tmp = (busiest->load_per_task * SCHED_POWER_SCALE) / | 6064 | if (busiest->avg_load > scaled_busy_load_per_task) { |
| 5779 | busiest->group_power; | ||
| 5780 | if (busiest->avg_load > tmp) { | ||
| 5781 | pwr_move += busiest->group_power * | 6065 | pwr_move += busiest->group_power * |
| 5782 | min(busiest->load_per_task, | 6066 | min(busiest->load_per_task, |
| 5783 | busiest->avg_load - tmp); | 6067 | busiest->avg_load - scaled_busy_load_per_task); |
| 5784 | } | 6068 | } |
| 5785 | 6069 | ||
| 5786 | /* Amount of load we'd add */ | 6070 | /* Amount of load we'd add */ |
| @@ -6359,17 +6643,23 @@ out: | |||
| 6359 | * idle_balance is called by schedule() if this_cpu is about to become | 6643 | * idle_balance is called by schedule() if this_cpu is about to become |
| 6360 | * idle. Attempts to pull tasks from other CPUs. | 6644 | * idle. Attempts to pull tasks from other CPUs. |
| 6361 | */ | 6645 | */ |
| 6362 | void idle_balance(int this_cpu, struct rq *this_rq) | 6646 | static int idle_balance(struct rq *this_rq) |
| 6363 | { | 6647 | { |
| 6364 | struct sched_domain *sd; | 6648 | struct sched_domain *sd; |
| 6365 | int pulled_task = 0; | 6649 | int pulled_task = 0; |
| 6366 | unsigned long next_balance = jiffies + HZ; | 6650 | unsigned long next_balance = jiffies + HZ; |
| 6367 | u64 curr_cost = 0; | 6651 | u64 curr_cost = 0; |
| 6652 | int this_cpu = this_rq->cpu; | ||
| 6368 | 6653 | ||
| 6654 | idle_enter_fair(this_rq); | ||
| 6655 | /* | ||
| 6656 | * We must set idle_stamp _before_ calling idle_balance(), such that we | ||
| 6657 | * measure the duration of idle_balance() as idle time. | ||
| 6658 | */ | ||
| 6369 | this_rq->idle_stamp = rq_clock(this_rq); | 6659 | this_rq->idle_stamp = rq_clock(this_rq); |
| 6370 | 6660 | ||
| 6371 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | 6661 | if (this_rq->avg_idle < sysctl_sched_migration_cost) |
| 6372 | return; | 6662 | goto out; |
| 6373 | 6663 | ||
| 6374 | /* | 6664 | /* |
| 6375 | * Drop the rq->lock, but keep IRQ/preempt disabled. | 6665 | * Drop the rq->lock, but keep IRQ/preempt disabled. |
| @@ -6407,15 +6697,22 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
| 6407 | interval = msecs_to_jiffies(sd->balance_interval); | 6697 | interval = msecs_to_jiffies(sd->balance_interval); |
| 6408 | if (time_after(next_balance, sd->last_balance + interval)) | 6698 | if (time_after(next_balance, sd->last_balance + interval)) |
| 6409 | next_balance = sd->last_balance + interval; | 6699 | next_balance = sd->last_balance + interval; |
| 6410 | if (pulled_task) { | 6700 | if (pulled_task) |
| 6411 | this_rq->idle_stamp = 0; | ||
| 6412 | break; | 6701 | break; |
| 6413 | } | ||
| 6414 | } | 6702 | } |
| 6415 | rcu_read_unlock(); | 6703 | rcu_read_unlock(); |
| 6416 | 6704 | ||
| 6417 | raw_spin_lock(&this_rq->lock); | 6705 | raw_spin_lock(&this_rq->lock); |
| 6418 | 6706 | ||
| 6707 | /* | ||
| 6708 | * While browsing the domains, we released the rq lock. | ||
| 6709 | * A task could have be enqueued in the meantime | ||
| 6710 | */ | ||
| 6711 | if (this_rq->cfs.h_nr_running && !pulled_task) { | ||
| 6712 | pulled_task = 1; | ||
| 6713 | goto out; | ||
| 6714 | } | ||
| 6715 | |||
| 6419 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 6716 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
| 6420 | /* | 6717 | /* |
| 6421 | * We are going idle. next_balance may be set based on | 6718 | * We are going idle. next_balance may be set based on |
| @@ -6426,6 +6723,20 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
| 6426 | 6723 | ||
| 6427 | if (curr_cost > this_rq->max_idle_balance_cost) | 6724 | if (curr_cost > this_rq->max_idle_balance_cost) |
| 6428 | this_rq->max_idle_balance_cost = curr_cost; | 6725 | this_rq->max_idle_balance_cost = curr_cost; |
| 6726 | |||
| 6727 | out: | ||
| 6728 | /* Is there a task of a high priority class? */ | ||
| 6729 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && | ||
| 6730 | (this_rq->dl.dl_nr_running || | ||
| 6731 | (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) | ||
| 6732 | pulled_task = -1; | ||
| 6733 | |||
| 6734 | if (pulled_task) { | ||
| 6735 | idle_exit_fair(this_rq); | ||
| 6736 | this_rq->idle_stamp = 0; | ||
| 6737 | } | ||
| 6738 | |||
| 6739 | return pulled_task; | ||
| 6429 | } | 6740 | } |
| 6430 | 6741 | ||
| 6431 | /* | 6742 | /* |
| @@ -6496,6 +6807,11 @@ out_unlock: | |||
| 6496 | return 0; | 6807 | return 0; |
| 6497 | } | 6808 | } |
| 6498 | 6809 | ||
| 6810 | static inline int on_null_domain(struct rq *rq) | ||
| 6811 | { | ||
| 6812 | return unlikely(!rcu_dereference_sched(rq->sd)); | ||
| 6813 | } | ||
| 6814 | |||
| 6499 | #ifdef CONFIG_NO_HZ_COMMON | 6815 | #ifdef CONFIG_NO_HZ_COMMON |
| 6500 | /* | 6816 | /* |
| 6501 | * idle load balancing details | 6817 | * idle load balancing details |
| @@ -6550,8 +6866,13 @@ static void nohz_balancer_kick(void) | |||
| 6550 | static inline void nohz_balance_exit_idle(int cpu) | 6866 | static inline void nohz_balance_exit_idle(int cpu) |
| 6551 | { | 6867 | { |
| 6552 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | 6868 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { |
| 6553 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | 6869 | /* |
| 6554 | atomic_dec(&nohz.nr_cpus); | 6870 | * Completely isolated CPUs don't ever set, so we must test. |
| 6871 | */ | ||
| 6872 | if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { | ||
| 6873 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
| 6874 | atomic_dec(&nohz.nr_cpus); | ||
| 6875 | } | ||
| 6555 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 6876 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
| 6556 | } | 6877 | } |
| 6557 | } | 6878 | } |
| @@ -6605,6 +6926,12 @@ void nohz_balance_enter_idle(int cpu) | |||
| 6605 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) | 6926 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) |
| 6606 | return; | 6927 | return; |
| 6607 | 6928 | ||
| 6929 | /* | ||
| 6930 | * If we're a completely isolated CPU, we don't play. | ||
| 6931 | */ | ||
| 6932 | if (on_null_domain(cpu_rq(cpu))) | ||
| 6933 | return; | ||
| 6934 | |||
| 6608 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); | 6935 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); |
| 6609 | atomic_inc(&nohz.nr_cpus); | 6936 | atomic_inc(&nohz.nr_cpus); |
| 6610 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 6937 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
| @@ -6867,11 +7194,6 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
| 6867 | nohz_idle_balance(this_rq, idle); | 7194 | nohz_idle_balance(this_rq, idle); |
| 6868 | } | 7195 | } |
| 6869 | 7196 | ||
| 6870 | static inline int on_null_domain(struct rq *rq) | ||
| 6871 | { | ||
| 6872 | return !rcu_dereference_sched(rq->sd); | ||
| 6873 | } | ||
| 6874 | |||
| 6875 | /* | 7197 | /* |
| 6876 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. | 7198 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. |
| 6877 | */ | 7199 | */ |
| @@ -7001,15 +7323,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
| 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7323 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 7002 | 7324 | ||
| 7003 | /* | 7325 | /* |
| 7004 | * Ensure the task's vruntime is normalized, so that when its | 7326 | * Ensure the task's vruntime is normalized, so that when it's |
| 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7327 | * switched back to the fair class the enqueue_entity(.flags=0) will |
| 7006 | * do the right thing. | 7328 | * do the right thing. |
| 7007 | * | 7329 | * |
| 7008 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7330 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
| 7009 | * have normalized the vruntime, if it was !on_rq, then only when | 7331 | * have normalized the vruntime, if it's !on_rq, then only when |
| 7010 | * the task is sleeping will it still have non-normalized vruntime. | 7332 | * the task is sleeping will it still have non-normalized vruntime. |
| 7011 | */ | 7333 | */ |
| 7012 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7334 | if (!p->on_rq && p->state != TASK_RUNNING) { |
| 7013 | /* | 7335 | /* |
| 7014 | * Fix up our vruntime so that the current sleep doesn't | 7336 | * Fix up our vruntime so that the current sleep doesn't |
| 7015 | * cause 'unlimited' sleep bonus. | 7337 | * cause 'unlimited' sleep bonus. |
| @@ -7036,7 +7358,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
| 7036 | */ | 7358 | */ |
| 7037 | static void switched_to_fair(struct rq *rq, struct task_struct *p) | 7359 | static void switched_to_fair(struct rq *rq, struct task_struct *p) |
| 7038 | { | 7360 | { |
| 7039 | if (!p->se.on_rq) | 7361 | struct sched_entity *se = &p->se; |
| 7362 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 7363 | /* | ||
| 7364 | * Since the real-depth could have been changed (only FAIR | ||
| 7365 | * class maintain depth value), reset depth properly. | ||
| 7366 | */ | ||
| 7367 | se->depth = se->parent ? se->parent->depth + 1 : 0; | ||
| 7368 | #endif | ||
| 7369 | if (!se->on_rq) | ||
| 7040 | return; | 7370 | return; |
| 7041 | 7371 | ||
| 7042 | /* | 7372 | /* |
| @@ -7084,7 +7414,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) | |||
| 7084 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7414 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7085 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 7415 | static void task_move_group_fair(struct task_struct *p, int on_rq) |
| 7086 | { | 7416 | { |
| 7417 | struct sched_entity *se = &p->se; | ||
| 7087 | struct cfs_rq *cfs_rq; | 7418 | struct cfs_rq *cfs_rq; |
| 7419 | |||
| 7088 | /* | 7420 | /* |
| 7089 | * If the task was not on the rq at the time of this cgroup movement | 7421 | * If the task was not on the rq at the time of this cgroup movement |
| 7090 | * it must have been asleep, sleeping tasks keep their ->vruntime | 7422 | * it must have been asleep, sleeping tasks keep their ->vruntime |
| @@ -7110,23 +7442,24 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) | |||
| 7110 | * To prevent boost or penalty in the new cfs_rq caused by delta | 7442 | * To prevent boost or penalty in the new cfs_rq caused by delta |
| 7111 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. | 7443 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. |
| 7112 | */ | 7444 | */ |
| 7113 | if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) | 7445 | if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING)) |
| 7114 | on_rq = 1; | 7446 | on_rq = 1; |
| 7115 | 7447 | ||
| 7116 | if (!on_rq) | 7448 | if (!on_rq) |
| 7117 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | 7449 | se->vruntime -= cfs_rq_of(se)->min_vruntime; |
| 7118 | set_task_rq(p, task_cpu(p)); | 7450 | set_task_rq(p, task_cpu(p)); |
| 7451 | se->depth = se->parent ? se->parent->depth + 1 : 0; | ||
| 7119 | if (!on_rq) { | 7452 | if (!on_rq) { |
| 7120 | cfs_rq = cfs_rq_of(&p->se); | 7453 | cfs_rq = cfs_rq_of(se); |
| 7121 | p->se.vruntime += cfs_rq->min_vruntime; | 7454 | se->vruntime += cfs_rq->min_vruntime; |
| 7122 | #ifdef CONFIG_SMP | 7455 | #ifdef CONFIG_SMP |
| 7123 | /* | 7456 | /* |
| 7124 | * migrate_task_rq_fair() will have removed our previous | 7457 | * migrate_task_rq_fair() will have removed our previous |
| 7125 | * contribution, but we must synchronize for ongoing future | 7458 | * contribution, but we must synchronize for ongoing future |
| 7126 | * decay. | 7459 | * decay. |
| 7127 | */ | 7460 | */ |
| 7128 | p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter); | 7461 | se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter); |
| 7129 | cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib; | 7462 | cfs_rq->blocked_load_avg += se->avg.load_avg_contrib; |
| 7130 | #endif | 7463 | #endif |
| 7131 | } | 7464 | } |
| 7132 | } | 7465 | } |
| @@ -7222,10 +7555,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
| 7222 | if (!se) | 7555 | if (!se) |
| 7223 | return; | 7556 | return; |
| 7224 | 7557 | ||
| 7225 | if (!parent) | 7558 | if (!parent) { |
| 7226 | se->cfs_rq = &rq->cfs; | 7559 | se->cfs_rq = &rq->cfs; |
| 7227 | else | 7560 | se->depth = 0; |
| 7561 | } else { | ||
| 7228 | se->cfs_rq = parent->my_q; | 7562 | se->cfs_rq = parent->my_q; |
| 7563 | se->depth = parent->depth + 1; | ||
| 7564 | } | ||
| 7229 | 7565 | ||
| 7230 | se->my_q = cfs_rq; | 7566 | se->my_q = cfs_rq; |
| 7231 | /* guarantee group entities always have weight */ | 7567 | /* guarantee group entities always have weight */ |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c new file mode 100644 index 000000000000..8f4390a079c7 --- /dev/null +++ b/kernel/sched/idle.c | |||
| @@ -0,0 +1,265 @@ | |||
| 1 | /* | ||
| 2 | * Generic entry point for the idle threads | ||
| 3 | */ | ||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/cpu.h> | ||
| 6 | #include <linux/cpuidle.h> | ||
| 7 | #include <linux/tick.h> | ||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <linux/stackprotector.h> | ||
| 10 | |||
| 11 | #include <asm/tlb.h> | ||
| 12 | |||
| 13 | #include <trace/events/power.h> | ||
| 14 | |||
| 15 | static int __read_mostly cpu_idle_force_poll; | ||
| 16 | |||
| 17 | void cpu_idle_poll_ctrl(bool enable) | ||
| 18 | { | ||
| 19 | if (enable) { | ||
| 20 | cpu_idle_force_poll++; | ||
| 21 | } else { | ||
| 22 | cpu_idle_force_poll--; | ||
| 23 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | ||
| 24 | } | ||
| 25 | } | ||
| 26 | |||
| 27 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | ||
| 28 | static int __init cpu_idle_poll_setup(char *__unused) | ||
| 29 | { | ||
| 30 | cpu_idle_force_poll = 1; | ||
| 31 | return 1; | ||
| 32 | } | ||
| 33 | __setup("nohlt", cpu_idle_poll_setup); | ||
| 34 | |||
| 35 | static int __init cpu_idle_nopoll_setup(char *__unused) | ||
| 36 | { | ||
| 37 | cpu_idle_force_poll = 0; | ||
| 38 | return 1; | ||
| 39 | } | ||
| 40 | __setup("hlt", cpu_idle_nopoll_setup); | ||
| 41 | #endif | ||
| 42 | |||
| 43 | static inline int cpu_idle_poll(void) | ||
| 44 | { | ||
| 45 | rcu_idle_enter(); | ||
| 46 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | ||
| 47 | local_irq_enable(); | ||
| 48 | while (!tif_need_resched()) | ||
| 49 | cpu_relax(); | ||
| 50 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||
| 51 | rcu_idle_exit(); | ||
| 52 | return 1; | ||
| 53 | } | ||
| 54 | |||
| 55 | /* Weak implementations for optional arch specific functions */ | ||
| 56 | void __weak arch_cpu_idle_prepare(void) { } | ||
| 57 | void __weak arch_cpu_idle_enter(void) { } | ||
| 58 | void __weak arch_cpu_idle_exit(void) { } | ||
| 59 | void __weak arch_cpu_idle_dead(void) { } | ||
| 60 | void __weak arch_cpu_idle(void) | ||
| 61 | { | ||
| 62 | cpu_idle_force_poll = 1; | ||
| 63 | local_irq_enable(); | ||
| 64 | } | ||
| 65 | |||
| 66 | /** | ||
| 67 | * cpuidle_idle_call - the main idle function | ||
| 68 | * | ||
| 69 | * NOTE: no locks or semaphores should be used here | ||
| 70 | * return non-zero on failure | ||
| 71 | */ | ||
| 72 | static int cpuidle_idle_call(void) | ||
| 73 | { | ||
| 74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
| 75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
| 76 | int next_state, entered_state, ret; | ||
| 77 | bool broadcast; | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Check if the idle task must be rescheduled. If it is the | ||
| 81 | * case, exit the function after re-enabling the local irq and | ||
| 82 | * set again the polling flag | ||
| 83 | */ | ||
| 84 | if (current_clr_polling_and_test()) { | ||
| 85 | local_irq_enable(); | ||
| 86 | __current_set_polling(); | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * During the idle period, stop measuring the disabled irqs | ||
| 92 | * critical sections latencies | ||
| 93 | */ | ||
| 94 | stop_critical_timings(); | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Tell the RCU framework we are entering an idle section, | ||
| 98 | * so no more rcu read side critical sections and one more | ||
| 99 | * step to the grace period | ||
| 100 | */ | ||
| 101 | rcu_idle_enter(); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Check if the cpuidle framework is ready, otherwise fallback | ||
| 105 | * to the default arch specific idle method | ||
| 106 | */ | ||
| 107 | ret = cpuidle_enabled(drv, dev); | ||
| 108 | |||
| 109 | if (!ret) { | ||
| 110 | /* | ||
| 111 | * Ask the governor to choose an idle state it thinks | ||
| 112 | * it is convenient to go to. There is *always* a | ||
| 113 | * convenient idle state | ||
| 114 | */ | ||
| 115 | next_state = cpuidle_select(drv, dev); | ||
| 116 | |||
| 117 | /* | ||
| 118 | * The idle task must be scheduled, it is pointless to | ||
| 119 | * go to idle, just update no idle residency and get | ||
| 120 | * out of this function | ||
| 121 | */ | ||
| 122 | if (current_clr_polling_and_test()) { | ||
| 123 | dev->last_residency = 0; | ||
| 124 | entered_state = next_state; | ||
| 125 | local_irq_enable(); | ||
| 126 | } else { | ||
| 127 | broadcast = !!(drv->states[next_state].flags & | ||
| 128 | CPUIDLE_FLAG_TIMER_STOP); | ||
| 129 | |||
| 130 | if (broadcast) | ||
| 131 | /* | ||
| 132 | * Tell the time framework to switch | ||
| 133 | * to a broadcast timer because our | ||
| 134 | * local timer will be shutdown. If a | ||
| 135 | * local timer is used from another | ||
| 136 | * cpu as a broadcast timer, this call | ||
| 137 | * may fail if it is not available | ||
| 138 | */ | ||
| 139 | ret = clockevents_notify( | ||
| 140 | CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
| 141 | &dev->cpu); | ||
| 142 | |||
| 143 | if (!ret) { | ||
| 144 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enter the idle state previously | ||
| 148 | * returned by the governor | ||
| 149 | * decision. This function will block | ||
| 150 | * until an interrupt occurs and will | ||
| 151 | * take care of re-enabling the local | ||
| 152 | * interrupts | ||
| 153 | */ | ||
| 154 | entered_state = cpuidle_enter(drv, dev, | ||
| 155 | next_state); | ||
| 156 | |||
| 157 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, | ||
| 158 | dev->cpu); | ||
| 159 | |||
| 160 | if (broadcast) | ||
| 161 | clockevents_notify( | ||
| 162 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
| 163 | &dev->cpu); | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Give the governor an opportunity to reflect on the | ||
| 167 | * outcome | ||
| 168 | */ | ||
| 169 | cpuidle_reflect(dev, entered_state); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * We can't use the cpuidle framework, let's use the default | ||
| 176 | * idle routine | ||
| 177 | */ | ||
| 178 | if (ret) | ||
| 179 | arch_cpu_idle(); | ||
| 180 | |||
| 181 | __current_set_polling(); | ||
| 182 | |||
| 183 | /* | ||
| 184 | * It is up to the idle functions to enable back the local | ||
| 185 | * interrupt | ||
| 186 | */ | ||
| 187 | if (WARN_ON_ONCE(irqs_disabled())) | ||
| 188 | local_irq_enable(); | ||
| 189 | |||
| 190 | rcu_idle_exit(); | ||
| 191 | start_critical_timings(); | ||
| 192 | |||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Generic idle loop implementation | ||
| 198 | */ | ||
| 199 | static void cpu_idle_loop(void) | ||
| 200 | { | ||
| 201 | while (1) { | ||
| 202 | tick_nohz_idle_enter(); | ||
| 203 | |||
| 204 | while (!need_resched()) { | ||
| 205 | check_pgt_cache(); | ||
| 206 | rmb(); | ||
| 207 | |||
| 208 | if (cpu_is_offline(smp_processor_id())) | ||
| 209 | arch_cpu_idle_dead(); | ||
| 210 | |||
| 211 | local_irq_disable(); | ||
| 212 | arch_cpu_idle_enter(); | ||
| 213 | |||
| 214 | /* | ||
| 215 | * In poll mode we reenable interrupts and spin. | ||
| 216 | * | ||
| 217 | * Also if we detected in the wakeup from idle | ||
| 218 | * path that the tick broadcast device expired | ||
| 219 | * for us, we don't want to go deep idle as we | ||
| 220 | * know that the IPI is going to arrive right | ||
| 221 | * away | ||
| 222 | */ | ||
| 223 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) | ||
| 224 | cpu_idle_poll(); | ||
| 225 | else | ||
| 226 | cpuidle_idle_call(); | ||
| 227 | |||
| 228 | arch_cpu_idle_exit(); | ||
| 229 | } | ||
| 230 | |||
| 231 | /* | ||
| 232 | * Since we fell out of the loop above, we know | ||
| 233 | * TIF_NEED_RESCHED must be set, propagate it into | ||
| 234 | * PREEMPT_NEED_RESCHED. | ||
| 235 | * | ||
| 236 | * This is required because for polling idle loops we will | ||
| 237 | * not have had an IPI to fold the state for us. | ||
| 238 | */ | ||
| 239 | preempt_set_need_resched(); | ||
| 240 | tick_nohz_idle_exit(); | ||
| 241 | schedule_preempt_disabled(); | ||
| 242 | } | ||
| 243 | } | ||
| 244 | |||
| 245 | void cpu_startup_entry(enum cpuhp_state state) | ||
| 246 | { | ||
| 247 | /* | ||
| 248 | * This #ifdef needs to die, but it's too late in the cycle to | ||
| 249 | * make this generic (arm and sh have never invoked the canary | ||
| 250 | * init for the non boot cpus!). Will be fixed in 3.11 | ||
| 251 | */ | ||
| 252 | #ifdef CONFIG_X86 | ||
| 253 | /* | ||
| 254 | * If we're the non-boot CPU, nothing set the stack canary up | ||
| 255 | * for us. The boot CPU already has it initialized but no harm | ||
| 256 | * in doing it again. This is a good place for updating it, as | ||
| 257 | * we wont ever return from this function (so the invalid | ||
| 258 | * canaries already on the stack wont ever trigger). | ||
| 259 | */ | ||
| 260 | boot_init_stack_canary(); | ||
| 261 | #endif | ||
| 262 | __current_set_polling(); | ||
| 263 | arch_cpu_idle_prepare(); | ||
| 264 | cpu_idle_loop(); | ||
| 265 | } | ||
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 516c3d9ceea1..879f2b75266a 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c | |||
| @@ -13,18 +13,8 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) | |||
| 13 | { | 13 | { |
| 14 | return task_cpu(p); /* IDLE tasks as never migrated */ | 14 | return task_cpu(p); /* IDLE tasks as never migrated */ |
| 15 | } | 15 | } |
| 16 | |||
| 17 | static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) | ||
| 18 | { | ||
| 19 | idle_exit_fair(rq); | ||
| 20 | rq_last_tick_reset(rq); | ||
| 21 | } | ||
| 22 | |||
| 23 | static void post_schedule_idle(struct rq *rq) | ||
| 24 | { | ||
| 25 | idle_enter_fair(rq); | ||
| 26 | } | ||
| 27 | #endif /* CONFIG_SMP */ | 16 | #endif /* CONFIG_SMP */ |
| 17 | |||
| 28 | /* | 18 | /* |
| 29 | * Idle tasks are unconditionally rescheduled: | 19 | * Idle tasks are unconditionally rescheduled: |
| 30 | */ | 20 | */ |
| @@ -33,13 +23,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl | |||
| 33 | resched_task(rq->idle); | 23 | resched_task(rq->idle); |
| 34 | } | 24 | } |
| 35 | 25 | ||
| 36 | static struct task_struct *pick_next_task_idle(struct rq *rq) | 26 | static struct task_struct * |
| 27 | pick_next_task_idle(struct rq *rq, struct task_struct *prev) | ||
| 37 | { | 28 | { |
| 29 | put_prev_task(rq, prev); | ||
| 30 | |||
| 38 | schedstat_inc(rq, sched_goidle); | 31 | schedstat_inc(rq, sched_goidle); |
| 39 | #ifdef CONFIG_SMP | ||
| 40 | /* Trigger the post schedule to do an idle_enter for CFS */ | ||
| 41 | rq->post_schedule = 1; | ||
| 42 | #endif | ||
| 43 | return rq->idle; | 32 | return rq->idle; |
| 44 | } | 33 | } |
| 45 | 34 | ||
| @@ -58,6 +47,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | |||
| 58 | 47 | ||
| 59 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | 48 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
| 60 | { | 49 | { |
| 50 | idle_exit_fair(rq); | ||
| 51 | rq_last_tick_reset(rq); | ||
| 61 | } | 52 | } |
| 62 | 53 | ||
| 63 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | 54 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
| @@ -101,8 +92,6 @@ const struct sched_class idle_sched_class = { | |||
| 101 | 92 | ||
| 102 | #ifdef CONFIG_SMP | 93 | #ifdef CONFIG_SMP |
| 103 | .select_task_rq = select_task_rq_idle, | 94 | .select_task_rq = select_task_rq_idle, |
| 104 | .pre_schedule = pre_schedule_idle, | ||
| 105 | .post_schedule = post_schedule_idle, | ||
| 106 | #endif | 95 | #endif |
| 107 | 96 | ||
| 108 | .set_curr_task = set_curr_task_idle, | 97 | .set_curr_task = set_curr_task_idle, |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..d8cdf1618551 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -229,6 +229,14 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 229 | 229 | ||
| 230 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
| 231 | 231 | ||
| 232 | static int pull_rt_task(struct rq *this_rq); | ||
| 233 | |||
| 234 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) | ||
| 235 | { | ||
| 236 | /* Try to pull RT tasks here if we lower this rq's prio */ | ||
| 237 | return rq->rt.highest_prio.curr > prev->prio; | ||
| 238 | } | ||
| 239 | |||
| 232 | static inline int rt_overloaded(struct rq *rq) | 240 | static inline int rt_overloaded(struct rq *rq) |
| 233 | { | 241 | { |
| 234 | return atomic_read(&rq->rd->rto_count); | 242 | return atomic_read(&rq->rd->rto_count); |
| @@ -315,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq) | |||
| 315 | return !plist_head_empty(&rq->rt.pushable_tasks); | 323 | return !plist_head_empty(&rq->rt.pushable_tasks); |
| 316 | } | 324 | } |
| 317 | 325 | ||
| 326 | static inline void set_post_schedule(struct rq *rq) | ||
| 327 | { | ||
| 328 | /* | ||
| 329 | * We detect this state here so that we can avoid taking the RQ | ||
| 330 | * lock again later if there is no need to push | ||
| 331 | */ | ||
| 332 | rq->post_schedule = has_pushable_tasks(rq); | ||
| 333 | } | ||
| 334 | |||
| 318 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 335 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
| 319 | { | 336 | { |
| 320 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 337 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
| @@ -359,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 359 | { | 376 | { |
| 360 | } | 377 | } |
| 361 | 378 | ||
| 379 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) | ||
| 380 | { | ||
| 381 | return false; | ||
| 382 | } | ||
| 383 | |||
| 384 | static inline int pull_rt_task(struct rq *this_rq) | ||
| 385 | { | ||
| 386 | return 0; | ||
| 387 | } | ||
| 388 | |||
| 389 | static inline void set_post_schedule(struct rq *rq) | ||
| 390 | { | ||
| 391 | } | ||
| 362 | #endif /* CONFIG_SMP */ | 392 | #endif /* CONFIG_SMP */ |
| 363 | 393 | ||
| 364 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) | 394 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) |
| @@ -440,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
| 440 | dequeue_rt_entity(rt_se); | 470 | dequeue_rt_entity(rt_se); |
| 441 | } | 471 | } |
| 442 | 472 | ||
| 443 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
| 444 | { | ||
| 445 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; | ||
| 446 | } | ||
| 447 | |||
| 448 | static int rt_se_boosted(struct sched_rt_entity *rt_se) | 473 | static int rt_se_boosted(struct sched_rt_entity *rt_se) |
| 449 | { | 474 | { |
| 450 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 475 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
| @@ -515,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
| 515 | { | 540 | { |
| 516 | } | 541 | } |
| 517 | 542 | ||
| 518 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
| 519 | { | ||
| 520 | return rt_rq->rt_throttled; | ||
| 521 | } | ||
| 522 | |||
| 523 | static inline const struct cpumask *sched_rt_period_mask(void) | 543 | static inline const struct cpumask *sched_rt_period_mask(void) |
| 524 | { | 544 | { |
| 525 | return cpu_online_mask; | 545 | return cpu_online_mask; |
| @@ -538,6 +558,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
| 538 | 558 | ||
| 539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 559 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 540 | 560 | ||
| 561 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
| 562 | { | ||
| 563 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
| 564 | |||
| 565 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
| 566 | rt_rq->rt_time < rt_b->rt_runtime); | ||
| 567 | } | ||
| 568 | |||
| 541 | #ifdef CONFIG_SMP | 569 | #ifdef CONFIG_SMP |
| 542 | /* | 570 | /* |
| 543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 571 | * We ran out of runtime, see if we can borrow some from our neighbours. |
| @@ -1310,15 +1338,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
| 1310 | { | 1338 | { |
| 1311 | struct sched_rt_entity *rt_se; | 1339 | struct sched_rt_entity *rt_se; |
| 1312 | struct task_struct *p; | 1340 | struct task_struct *p; |
| 1313 | struct rt_rq *rt_rq; | 1341 | struct rt_rq *rt_rq = &rq->rt; |
| 1314 | |||
| 1315 | rt_rq = &rq->rt; | ||
| 1316 | |||
| 1317 | if (!rt_rq->rt_nr_running) | ||
| 1318 | return NULL; | ||
| 1319 | |||
| 1320 | if (rt_rq_throttled(rt_rq)) | ||
| 1321 | return NULL; | ||
| 1322 | 1342 | ||
| 1323 | do { | 1343 | do { |
| 1324 | rt_se = pick_next_rt_entity(rq, rt_rq); | 1344 | rt_se = pick_next_rt_entity(rq, rt_rq); |
| @@ -1332,21 +1352,45 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
| 1332 | return p; | 1352 | return p; |
| 1333 | } | 1353 | } |
| 1334 | 1354 | ||
| 1335 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 1355 | static struct task_struct * |
| 1356 | pick_next_task_rt(struct rq *rq, struct task_struct *prev) | ||
| 1336 | { | 1357 | { |
| 1337 | struct task_struct *p = _pick_next_task_rt(rq); | 1358 | struct task_struct *p; |
| 1359 | struct rt_rq *rt_rq = &rq->rt; | ||
| 1360 | |||
| 1361 | if (need_pull_rt_task(rq, prev)) { | ||
| 1362 | pull_rt_task(rq); | ||
| 1363 | /* | ||
| 1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | ||
| 1365 | * means a dl task can slip in, in which case we need to | ||
| 1366 | * re-start task selection. | ||
| 1367 | */ | ||
| 1368 | if (unlikely(rq->dl.dl_nr_running)) | ||
| 1369 | return RETRY_TASK; | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | /* | ||
| 1373 | * We may dequeue prev's rt_rq in put_prev_task(). | ||
| 1374 | * So, we update time before rt_nr_running check. | ||
| 1375 | */ | ||
| 1376 | if (prev->sched_class == &rt_sched_class) | ||
| 1377 | update_curr_rt(rq); | ||
| 1378 | |||
| 1379 | if (!rt_rq->rt_nr_running) | ||
| 1380 | return NULL; | ||
| 1381 | |||
| 1382 | if (rt_rq_throttled(rt_rq)) | ||
| 1383 | return NULL; | ||
| 1384 | |||
| 1385 | put_prev_task(rq, prev); | ||
| 1386 | |||
| 1387 | p = _pick_next_task_rt(rq); | ||
| 1338 | 1388 | ||
| 1339 | /* The running task is never eligible for pushing */ | 1389 | /* The running task is never eligible for pushing */ |
| 1340 | if (p) | 1390 | if (p) |
| 1341 | dequeue_pushable_task(rq, p); | 1391 | dequeue_pushable_task(rq, p); |
| 1342 | 1392 | ||
| 1343 | #ifdef CONFIG_SMP | 1393 | set_post_schedule(rq); |
| 1344 | /* | ||
| 1345 | * We detect this state here so that we can avoid taking the RQ | ||
| 1346 | * lock again later if there is no need to push | ||
| 1347 | */ | ||
| 1348 | rq->post_schedule = has_pushable_tasks(rq); | ||
| 1349 | #endif | ||
| 1350 | 1394 | ||
| 1351 | return p; | 1395 | return p; |
| 1352 | } | 1396 | } |
| @@ -1716,13 +1760,6 @@ skip: | |||
| 1716 | return ret; | 1760 | return ret; |
| 1717 | } | 1761 | } |
| 1718 | 1762 | ||
| 1719 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | ||
| 1720 | { | ||
| 1721 | /* Try to pull RT tasks here if we lower this rq's prio */ | ||
| 1722 | if (rq->rt.highest_prio.curr > prev->prio) | ||
| 1723 | pull_rt_task(rq); | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | static void post_schedule_rt(struct rq *rq) | 1763 | static void post_schedule_rt(struct rq *rq) |
| 1727 | { | 1764 | { |
| 1728 | push_rt_tasks(rq); | 1765 | push_rt_tasks(rq); |
| @@ -1825,7 +1862,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
| 1825 | resched_task(rq->curr); | 1862 | resched_task(rq->curr); |
| 1826 | } | 1863 | } |
| 1827 | 1864 | ||
| 1828 | void init_sched_rt_class(void) | 1865 | void __init init_sched_rt_class(void) |
| 1829 | { | 1866 | { |
| 1830 | unsigned int i; | 1867 | unsigned int i; |
| 1831 | 1868 | ||
| @@ -1999,7 +2036,6 @@ const struct sched_class rt_sched_class = { | |||
| 1999 | .set_cpus_allowed = set_cpus_allowed_rt, | 2036 | .set_cpus_allowed = set_cpus_allowed_rt, |
| 2000 | .rq_online = rq_online_rt, | 2037 | .rq_online = rq_online_rt, |
| 2001 | .rq_offline = rq_offline_rt, | 2038 | .rq_offline = rq_offline_rt, |
| 2002 | .pre_schedule = pre_schedule_rt, | ||
| 2003 | .post_schedule = post_schedule_rt, | 2039 | .post_schedule = post_schedule_rt, |
| 2004 | .task_woken = task_woken_rt, | 2040 | .task_woken = task_woken_rt, |
| 2005 | .switched_from = switched_from_rt, | 2041 | .switched_from = switched_from_rt, |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f964add50f38..c9007f28d3a2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -24,24 +24,6 @@ extern long calc_load_fold_active(struct rq *this_rq); | |||
| 24 | extern void update_cpu_load_active(struct rq *this_rq); | 24 | extern void update_cpu_load_active(struct rq *this_rq); |
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * Convert user-nice values [ -20 ... 0 ... 19 ] | ||
| 28 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], | ||
| 29 | * and back. | ||
| 30 | */ | ||
| 31 | #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) | ||
| 32 | #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) | ||
| 33 | #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) | ||
| 34 | |||
| 35 | /* | ||
| 36 | * 'User priority' is the nice value converted to something we | ||
| 37 | * can work with better when scaling various scheduler parameters, | ||
| 38 | * it's a [ 0 ... 39 ] range. | ||
| 39 | */ | ||
| 40 | #define USER_PRIO(p) ((p)-MAX_RT_PRIO) | ||
| 41 | #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) | ||
| 42 | #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Helpers for converting nanosecond timing to jiffy resolution | 27 | * Helpers for converting nanosecond timing to jiffy resolution |
| 46 | */ | 28 | */ |
| 47 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) | 29 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
| @@ -441,6 +423,18 @@ struct rt_rq { | |||
| 441 | #endif | 423 | #endif |
| 442 | }; | 424 | }; |
| 443 | 425 | ||
| 426 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 427 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
| 428 | { | ||
| 429 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; | ||
| 430 | } | ||
| 431 | #else | ||
| 432 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
| 433 | { | ||
| 434 | return rt_rq->rt_throttled; | ||
| 435 | } | ||
| 436 | #endif | ||
| 437 | |||
| 444 | /* Deadline class' related fields in a runqueue */ | 438 | /* Deadline class' related fields in a runqueue */ |
| 445 | struct dl_rq { | 439 | struct dl_rq { |
| 446 | /* runqueue is an rbtree, ordered by deadline */ | 440 | /* runqueue is an rbtree, ordered by deadline */ |
| @@ -558,11 +552,9 @@ struct rq { | |||
| 558 | #ifdef CONFIG_FAIR_GROUP_SCHED | 552 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 559 | /* list of leaf cfs_rq on this cpu: */ | 553 | /* list of leaf cfs_rq on this cpu: */ |
| 560 | struct list_head leaf_cfs_rq_list; | 554 | struct list_head leaf_cfs_rq_list; |
| 561 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
| 562 | 555 | ||
| 563 | #ifdef CONFIG_RT_GROUP_SCHED | 556 | struct sched_avg avg; |
| 564 | struct list_head leaf_rt_rq_list; | 557 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 565 | #endif | ||
| 566 | 558 | ||
| 567 | /* | 559 | /* |
| 568 | * This is part of a global counter where only the total sum | 560 | * This is part of a global counter where only the total sum |
| @@ -651,8 +643,6 @@ struct rq { | |||
| 651 | #ifdef CONFIG_SMP | 643 | #ifdef CONFIG_SMP |
| 652 | struct llist_head wake_list; | 644 | struct llist_head wake_list; |
| 653 | #endif | 645 | #endif |
| 654 | |||
| 655 | struct sched_avg avg; | ||
| 656 | }; | 646 | }; |
| 657 | 647 | ||
| 658 | static inline int cpu_of(struct rq *rq) | 648 | static inline int cpu_of(struct rq *rq) |
| @@ -1112,6 +1102,8 @@ static const u32 prio_to_wmult[40] = { | |||
| 1112 | 1102 | ||
| 1113 | #define DEQUEUE_SLEEP 1 | 1103 | #define DEQUEUE_SLEEP 1 |
| 1114 | 1104 | ||
| 1105 | #define RETRY_TASK ((void *)-1UL) | ||
| 1106 | |||
| 1115 | struct sched_class { | 1107 | struct sched_class { |
| 1116 | const struct sched_class *next; | 1108 | const struct sched_class *next; |
| 1117 | 1109 | ||
| @@ -1122,14 +1114,22 @@ struct sched_class { | |||
| 1122 | 1114 | ||
| 1123 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1115 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1124 | 1116 | ||
| 1125 | struct task_struct * (*pick_next_task) (struct rq *rq); | 1117 | /* |
| 1118 | * It is the responsibility of the pick_next_task() method that will | ||
| 1119 | * return the next task to call put_prev_task() on the @prev task or | ||
| 1120 | * something equivalent. | ||
| 1121 | * | ||
| 1122 | * May return RETRY_TASK when it finds a higher prio class has runnable | ||
| 1123 | * tasks. | ||
| 1124 | */ | ||
| 1125 | struct task_struct * (*pick_next_task) (struct rq *rq, | ||
| 1126 | struct task_struct *prev); | ||
| 1126 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1127 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1127 | 1128 | ||
| 1128 | #ifdef CONFIG_SMP | 1129 | #ifdef CONFIG_SMP |
| 1129 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); | 1130 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
| 1130 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | 1131 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); |
| 1131 | 1132 | ||
| 1132 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
| 1133 | void (*post_schedule) (struct rq *this_rq); | 1133 | void (*post_schedule) (struct rq *this_rq); |
| 1134 | void (*task_waking) (struct task_struct *task); | 1134 | void (*task_waking) (struct task_struct *task); |
| 1135 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1135 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
| @@ -1159,6 +1159,11 @@ struct sched_class { | |||
| 1159 | #endif | 1159 | #endif |
| 1160 | }; | 1160 | }; |
| 1161 | 1161 | ||
| 1162 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) | ||
| 1163 | { | ||
| 1164 | prev->sched_class->put_prev_task(rq, prev); | ||
| 1165 | } | ||
| 1166 | |||
| 1162 | #define sched_class_highest (&stop_sched_class) | 1167 | #define sched_class_highest (&stop_sched_class) |
| 1163 | #define for_each_class(class) \ | 1168 | #define for_each_class(class) \ |
| 1164 | for (class = sched_class_highest; class; class = class->next) | 1169 | for (class = sched_class_highest; class; class = class->next) |
| @@ -1175,16 +1180,14 @@ extern const struct sched_class idle_sched_class; | |||
| 1175 | extern void update_group_power(struct sched_domain *sd, int cpu); | 1180 | extern void update_group_power(struct sched_domain *sd, int cpu); |
| 1176 | 1181 | ||
| 1177 | extern void trigger_load_balance(struct rq *rq); | 1182 | extern void trigger_load_balance(struct rq *rq); |
| 1178 | extern void idle_balance(int this_cpu, struct rq *this_rq); | ||
| 1179 | 1183 | ||
| 1180 | extern void idle_enter_fair(struct rq *this_rq); | 1184 | extern void idle_enter_fair(struct rq *this_rq); |
| 1181 | extern void idle_exit_fair(struct rq *this_rq); | 1185 | extern void idle_exit_fair(struct rq *this_rq); |
| 1182 | 1186 | ||
| 1183 | #else /* CONFIG_SMP */ | 1187 | #else |
| 1184 | 1188 | ||
| 1185 | static inline void idle_balance(int cpu, struct rq *rq) | 1189 | static inline void idle_enter_fair(struct rq *rq) { } |
| 1186 | { | 1190 | static inline void idle_exit_fair(struct rq *rq) { } |
| 1187 | } | ||
| 1188 | 1191 | ||
| 1189 | #endif | 1192 | #endif |
| 1190 | 1193 | ||
| @@ -1213,16 +1216,6 @@ extern void update_idle_cpu_load(struct rq *this_rq); | |||
| 1213 | 1216 | ||
| 1214 | extern void init_task_runnable_average(struct task_struct *p); | 1217 | extern void init_task_runnable_average(struct task_struct *p); |
| 1215 | 1218 | ||
| 1216 | #ifdef CONFIG_PARAVIRT | ||
| 1217 | static inline u64 steal_ticks(u64 steal) | ||
| 1218 | { | ||
| 1219 | if (unlikely(steal > NSEC_PER_SEC)) | ||
| 1220 | return div_u64(steal, TICK_NSEC); | ||
| 1221 | |||
| 1222 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
| 1223 | } | ||
| 1224 | #endif | ||
| 1225 | |||
| 1226 | static inline void inc_nr_running(struct rq *rq) | 1219 | static inline void inc_nr_running(struct rq *rq) |
| 1227 | { | 1220 | { |
| 1228 | rq->nr_running++; | 1221 | rq->nr_running++; |
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index da98af347e8b..a476bea17fbc 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c | |||
| @@ -142,4 +142,4 @@ static int __init proc_schedstat_init(void) | |||
| 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); |
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | module_init(proc_schedstat_init); | 145 | subsys_initcall(proc_schedstat_init); |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index fdb6bb0b3356..d6ce65dde541 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
| @@ -23,16 +23,19 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | |||
| 23 | /* we're never preempted */ | 23 | /* we're never preempted */ |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | static struct task_struct *pick_next_task_stop(struct rq *rq) | 26 | static struct task_struct * |
| 27 | pick_next_task_stop(struct rq *rq, struct task_struct *prev) | ||
| 27 | { | 28 | { |
| 28 | struct task_struct *stop = rq->stop; | 29 | struct task_struct *stop = rq->stop; |
| 29 | 30 | ||
| 30 | if (stop && stop->on_rq) { | 31 | if (!stop || !stop->on_rq) |
| 31 | stop->se.exec_start = rq_clock_task(rq); | 32 | return NULL; |
| 32 | return stop; | ||
| 33 | } | ||
| 34 | 33 | ||
| 35 | return NULL; | 34 | put_prev_task(rq, prev); |
| 35 | |||
| 36 | stop->se.exec_start = rq_clock_task(rq); | ||
| 37 | |||
| 38 | return stop; | ||
| 36 | } | 39 | } |
| 37 | 40 | ||
| 38 | static void | 41 | static void |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..d8d046c0726a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -55,60 +55,33 @@ struct seccomp_filter { | |||
| 55 | atomic_t usage; | 55 | atomic_t usage; |
| 56 | struct seccomp_filter *prev; | 56 | struct seccomp_filter *prev; |
| 57 | unsigned short len; /* Instruction count */ | 57 | unsigned short len; /* Instruction count */ |
| 58 | struct sock_filter insns[]; | 58 | struct sock_filter_int insnsi[]; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | /* Limit any path through the tree to 256KB worth of instructions. */ | 61 | /* Limit any path through the tree to 256KB worth of instructions. */ |
| 62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) | 62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) |
| 63 | 63 | ||
| 64 | /** | 64 | /* |
| 65 | * get_u32 - returns a u32 offset into data | ||
| 66 | * @data: a unsigned 64 bit value | ||
| 67 | * @index: 0 or 1 to return the first or second 32-bits | ||
| 68 | * | ||
| 69 | * This inline exists to hide the length of unsigned long. If a 32-bit | ||
| 70 | * unsigned long is passed in, it will be extended and the top 32-bits will be | ||
| 71 | * 0. If it is a 64-bit unsigned long, then whatever data is resident will be | ||
| 72 | * properly returned. | ||
| 73 | * | ||
| 74 | * Endianness is explicitly ignored and left for BPF program authors to manage | 65 | * Endianness is explicitly ignored and left for BPF program authors to manage |
| 75 | * as per the specific architecture. | 66 | * as per the specific architecture. |
| 76 | */ | 67 | */ |
| 77 | static inline u32 get_u32(u64 data, int index) | 68 | static void populate_seccomp_data(struct seccomp_data *sd) |
| 78 | { | 69 | { |
| 79 | return ((u32 *)&data)[index]; | 70 | struct task_struct *task = current; |
| 80 | } | 71 | struct pt_regs *regs = task_pt_regs(task); |
| 81 | 72 | ||
| 82 | /* Helper for bpf_load below. */ | 73 | sd->nr = syscall_get_nr(task, regs); |
| 83 | #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) | 74 | sd->arch = syscall_get_arch(); |
| 84 | /** | 75 | |
| 85 | * bpf_load: checks and returns a pointer to the requested offset | 76 | /* Unroll syscall_get_args to help gcc on arm. */ |
| 86 | * @off: offset into struct seccomp_data to load from | 77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); |
| 87 | * | 78 | syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); |
| 88 | * Returns the requested 32-bits of data. | 79 | syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); |
| 89 | * seccomp_check_filter() should assure that @off is 32-bit aligned | 80 | syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); |
| 90 | * and not out of bounds. Failure to do so is a BUG. | 81 | syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); |
| 91 | */ | 82 | syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); |
| 92 | u32 seccomp_bpf_load(int off) | 83 | |
| 93 | { | 84 | sd->instruction_pointer = KSTK_EIP(task); |
| 94 | struct pt_regs *regs = task_pt_regs(current); | ||
| 95 | if (off == BPF_DATA(nr)) | ||
| 96 | return syscall_get_nr(current, regs); | ||
| 97 | if (off == BPF_DATA(arch)) | ||
| 98 | return syscall_get_arch(current, regs); | ||
| 99 | if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { | ||
| 100 | unsigned long value; | ||
| 101 | int arg = (off - BPF_DATA(args[0])) / sizeof(u64); | ||
| 102 | int index = !!(off % sizeof(u64)); | ||
| 103 | syscall_get_arguments(current, regs, arg, 1, &value); | ||
| 104 | return get_u32(value, index); | ||
| 105 | } | ||
| 106 | if (off == BPF_DATA(instruction_pointer)) | ||
| 107 | return get_u32(KSTK_EIP(current), 0); | ||
| 108 | if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) | ||
| 109 | return get_u32(KSTK_EIP(current), 1); | ||
| 110 | /* seccomp_check_filter should make this impossible. */ | ||
| 111 | BUG(); | ||
| 112 | } | 85 | } |
| 113 | 86 | ||
| 114 | /** | 87 | /** |
| @@ -133,17 +106,17 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 133 | 106 | ||
| 134 | switch (code) { | 107 | switch (code) { |
| 135 | case BPF_S_LD_W_ABS: | 108 | case BPF_S_LD_W_ABS: |
| 136 | ftest->code = BPF_S_ANC_SECCOMP_LD_W; | 109 | ftest->code = BPF_LDX | BPF_W | BPF_ABS; |
| 137 | /* 32-bit aligned and not out of bounds. */ | 110 | /* 32-bit aligned and not out of bounds. */ |
| 138 | if (k >= sizeof(struct seccomp_data) || k & 3) | 111 | if (k >= sizeof(struct seccomp_data) || k & 3) |
| 139 | return -EINVAL; | 112 | return -EINVAL; |
| 140 | continue; | 113 | continue; |
| 141 | case BPF_S_LD_W_LEN: | 114 | case BPF_S_LD_W_LEN: |
| 142 | ftest->code = BPF_S_LD_IMM; | 115 | ftest->code = BPF_LD | BPF_IMM; |
| 143 | ftest->k = sizeof(struct seccomp_data); | 116 | ftest->k = sizeof(struct seccomp_data); |
| 144 | continue; | 117 | continue; |
| 145 | case BPF_S_LDX_W_LEN: | 118 | case BPF_S_LDX_W_LEN: |
| 146 | ftest->code = BPF_S_LDX_IMM; | 119 | ftest->code = BPF_LDX | BPF_IMM; |
| 147 | ftest->k = sizeof(struct seccomp_data); | 120 | ftest->k = sizeof(struct seccomp_data); |
| 148 | continue; | 121 | continue; |
| 149 | /* Explicitly include allowed calls. */ | 122 | /* Explicitly include allowed calls. */ |
| @@ -185,6 +158,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 185 | case BPF_S_JMP_JGT_X: | 158 | case BPF_S_JMP_JGT_X: |
| 186 | case BPF_S_JMP_JSET_K: | 159 | case BPF_S_JMP_JSET_K: |
| 187 | case BPF_S_JMP_JSET_X: | 160 | case BPF_S_JMP_JSET_X: |
| 161 | sk_decode_filter(ftest, ftest); | ||
| 188 | continue; | 162 | continue; |
| 189 | default: | 163 | default: |
| 190 | return -EINVAL; | 164 | return -EINVAL; |
| @@ -202,18 +176,21 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 202 | static u32 seccomp_run_filters(int syscall) | 176 | static u32 seccomp_run_filters(int syscall) |
| 203 | { | 177 | { |
| 204 | struct seccomp_filter *f; | 178 | struct seccomp_filter *f; |
| 179 | struct seccomp_data sd; | ||
| 205 | u32 ret = SECCOMP_RET_ALLOW; | 180 | u32 ret = SECCOMP_RET_ALLOW; |
| 206 | 181 | ||
| 207 | /* Ensure unexpected behavior doesn't result in failing open. */ | 182 | /* Ensure unexpected behavior doesn't result in failing open. */ |
| 208 | if (WARN_ON(current->seccomp.filter == NULL)) | 183 | if (WARN_ON(current->seccomp.filter == NULL)) |
| 209 | return SECCOMP_RET_KILL; | 184 | return SECCOMP_RET_KILL; |
| 210 | 185 | ||
| 186 | populate_seccomp_data(&sd); | ||
| 187 | |||
| 211 | /* | 188 | /* |
| 212 | * All filters in the list are evaluated and the lowest BPF return | 189 | * All filters in the list are evaluated and the lowest BPF return |
| 213 | * value always takes priority (ignoring the DATA). | 190 | * value always takes priority (ignoring the DATA). |
| 214 | */ | 191 | */ |
| 215 | for (f = current->seccomp.filter; f; f = f->prev) { | 192 | for (f = current->seccomp.filter; f; f = f->prev) { |
| 216 | u32 cur_ret = sk_run_filter(NULL, f->insns); | 193 | u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi); |
| 217 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | 194 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) |
| 218 | ret = cur_ret; | 195 | ret = cur_ret; |
| 219 | } | 196 | } |
| @@ -231,6 +208,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 231 | struct seccomp_filter *filter; | 208 | struct seccomp_filter *filter; |
| 232 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); | 209 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); |
| 233 | unsigned long total_insns = fprog->len; | 210 | unsigned long total_insns = fprog->len; |
| 211 | struct sock_filter *fp; | ||
| 212 | int new_len; | ||
| 234 | long ret; | 213 | long ret; |
| 235 | 214 | ||
| 236 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) | 215 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) |
| @@ -252,28 +231,43 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 252 | CAP_SYS_ADMIN) != 0) | 231 | CAP_SYS_ADMIN) != 0) |
| 253 | return -EACCES; | 232 | return -EACCES; |
| 254 | 233 | ||
| 255 | /* Allocate a new seccomp_filter */ | 234 | fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN); |
| 256 | filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, | 235 | if (!fp) |
| 257 | GFP_KERNEL|__GFP_NOWARN); | ||
| 258 | if (!filter) | ||
| 259 | return -ENOMEM; | 236 | return -ENOMEM; |
| 260 | atomic_set(&filter->usage, 1); | ||
| 261 | filter->len = fprog->len; | ||
| 262 | 237 | ||
| 263 | /* Copy the instructions from fprog. */ | 238 | /* Copy the instructions from fprog. */ |
| 264 | ret = -EFAULT; | 239 | ret = -EFAULT; |
| 265 | if (copy_from_user(filter->insns, fprog->filter, fp_size)) | 240 | if (copy_from_user(fp, fprog->filter, fp_size)) |
| 266 | goto fail; | 241 | goto free_prog; |
| 267 | 242 | ||
| 268 | /* Check and rewrite the fprog via the skb checker */ | 243 | /* Check and rewrite the fprog via the skb checker */ |
| 269 | ret = sk_chk_filter(filter->insns, filter->len); | 244 | ret = sk_chk_filter(fp, fprog->len); |
| 270 | if (ret) | 245 | if (ret) |
| 271 | goto fail; | 246 | goto free_prog; |
| 272 | 247 | ||
| 273 | /* Check and rewrite the fprog for seccomp use */ | 248 | /* Check and rewrite the fprog for seccomp use */ |
| 274 | ret = seccomp_check_filter(filter->insns, filter->len); | 249 | ret = seccomp_check_filter(fp, fprog->len); |
| 250 | if (ret) | ||
| 251 | goto free_prog; | ||
| 252 | |||
| 253 | /* Convert 'sock_filter' insns to 'sock_filter_int' insns */ | ||
| 254 | ret = sk_convert_filter(fp, fprog->len, NULL, &new_len); | ||
| 255 | if (ret) | ||
| 256 | goto free_prog; | ||
| 257 | |||
| 258 | /* Allocate a new seccomp_filter */ | ||
| 259 | filter = kzalloc(sizeof(struct seccomp_filter) + | ||
| 260 | sizeof(struct sock_filter_int) * new_len, | ||
| 261 | GFP_KERNEL|__GFP_NOWARN); | ||
| 262 | if (!filter) | ||
| 263 | goto free_prog; | ||
| 264 | |||
| 265 | ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); | ||
| 275 | if (ret) | 266 | if (ret) |
| 276 | goto fail; | 267 | goto free_filter; |
| 268 | |||
| 269 | atomic_set(&filter->usage, 1); | ||
| 270 | filter->len = new_len; | ||
| 277 | 271 | ||
| 278 | /* | 272 | /* |
| 279 | * If there is an existing filter, make it the prev and don't drop its | 273 | * If there is an existing filter, make it the prev and don't drop its |
| @@ -282,8 +276,11 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 282 | filter->prev = current->seccomp.filter; | 276 | filter->prev = current->seccomp.filter; |
| 283 | current->seccomp.filter = filter; | 277 | current->seccomp.filter = filter; |
| 284 | return 0; | 278 | return 0; |
| 285 | fail: | 279 | |
| 280 | free_filter: | ||
| 286 | kfree(filter); | 281 | kfree(filter); |
| 282 | free_prog: | ||
| 283 | kfree(fp); | ||
| 287 | return ret; | 284 | return ret; |
| 288 | } | 285 | } |
| 289 | 286 | ||
| @@ -293,7 +290,7 @@ fail: | |||
| 293 | * | 290 | * |
| 294 | * Returns 0 on success and non-zero otherwise. | 291 | * Returns 0 on success and non-zero otherwise. |
| 295 | */ | 292 | */ |
| 296 | long seccomp_attach_user_filter(char __user *user_filter) | 293 | static long seccomp_attach_user_filter(char __user *user_filter) |
| 297 | { | 294 | { |
| 298 | struct sock_fprog fprog; | 295 | struct sock_fprog fprog; |
| 299 | long ret = -EFAULT; | 296 | long ret = -EFAULT; |
| @@ -351,7 +348,7 @@ static void seccomp_send_sigsys(int syscall, int reason) | |||
| 351 | info.si_code = SYS_SECCOMP; | 348 | info.si_code = SYS_SECCOMP; |
| 352 | info.si_call_addr = (void __user *)KSTK_EIP(current); | 349 | info.si_call_addr = (void __user *)KSTK_EIP(current); |
| 353 | info.si_errno = reason; | 350 | info.si_errno = reason; |
| 354 | info.si_arch = syscall_get_arch(current, task_pt_regs(current)); | 351 | info.si_arch = syscall_get_arch(); |
| 355 | info.si_syscall = syscall; | 352 | info.si_syscall = syscall; |
| 356 | force_sig_info(SIGSYS, &info, current); | 353 | force_sig_info(SIGSYS, &info, current); |
| 357 | } | 354 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index 52f881db1ca0..6ea13c09ae56 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <linux/uprobes.h> | 33 | #include <linux/uprobes.h> |
| 34 | #include <linux/compat.h> | 34 | #include <linux/compat.h> |
| 35 | #include <linux/cn_proc.h> | 35 | #include <linux/cn_proc.h> |
| 36 | #include <linux/compiler.h> | ||
| 37 | |||
| 36 | #define CREATE_TRACE_POINTS | 38 | #define CREATE_TRACE_POINTS |
| 37 | #include <trace/events/signal.h> | 39 | #include <trace/events/signal.h> |
| 38 | 40 | ||
| @@ -2382,7 +2384,7 @@ relock: | |||
| 2382 | * @regs: user register state | 2384 | * @regs: user register state |
| 2383 | * @stepping: nonzero if debugger single-step or block-step in use | 2385 | * @stepping: nonzero if debugger single-step or block-step in use |
| 2384 | * | 2386 | * |
| 2385 | * This function should be called when a signal has succesfully been | 2387 | * This function should be called when a signal has successfully been |
| 2386 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask | 2388 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask |
| 2387 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER | 2389 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
| 2388 | * is set in @ka->sa.sa_flags. Tracing is notified. | 2390 | * is set in @ka->sa.sa_flags. Tracing is notified. |
| @@ -3618,7 +3620,7 @@ SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |||
| 3618 | } | 3620 | } |
| 3619 | #endif | 3621 | #endif |
| 3620 | 3622 | ||
| 3621 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | 3623 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
| 3622 | { | 3624 | { |
| 3623 | return NULL; | 3625 | return NULL; |
| 3624 | } | 3626 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index ffee35bef179..06d574e42c72 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd) | |||
| 117 | csd->flags &= ~CSD_FLAG_LOCK; | 117 | csd->flags &= ~CSD_FLAG_LOCK; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 121 | |||
| 120 | /* | 122 | /* |
| 121 | * Insert a previously allocated call_single_data element | 123 | * Insert a previously allocated call_single_data element |
| 122 | * for execution on the given CPU. data must already have | 124 | * for execution on the given CPU. data must already have |
| 123 | * ->func, ->info, and ->flags set. | 125 | * ->func, ->info, and ->flags set. |
| 124 | */ | 126 | */ |
| 125 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | 127 | static int generic_exec_single(int cpu, struct call_single_data *csd, |
| 128 | smp_call_func_t func, void *info, int wait) | ||
| 126 | { | 129 | { |
| 130 | struct call_single_data csd_stack = { .flags = 0 }; | ||
| 131 | unsigned long flags; | ||
| 132 | |||
| 133 | |||
| 134 | if (cpu == smp_processor_id()) { | ||
| 135 | local_irq_save(flags); | ||
| 136 | func(info); | ||
| 137 | local_irq_restore(flags); | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | ||
| 143 | return -ENXIO; | ||
| 144 | |||
| 145 | |||
| 146 | if (!csd) { | ||
| 147 | csd = &csd_stack; | ||
| 148 | if (!wait) | ||
| 149 | csd = &__get_cpu_var(csd_data); | ||
| 150 | } | ||
| 151 | |||
| 152 | csd_lock(csd); | ||
| 153 | |||
| 154 | csd->func = func; | ||
| 155 | csd->info = info; | ||
| 156 | |||
| 127 | if (wait) | 157 | if (wait) |
| 128 | csd->flags |= CSD_FLAG_WAIT; | 158 | csd->flags |= CSD_FLAG_WAIT; |
| 129 | 159 | ||
| @@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
| 143 | 173 | ||
| 144 | if (wait) | 174 | if (wait) |
| 145 | csd_lock_wait(csd); | 175 | csd_lock_wait(csd); |
| 176 | |||
| 177 | return 0; | ||
| 146 | } | 178 | } |
| 147 | 179 | ||
| 148 | /* | 180 | /* |
| @@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
| 151 | */ | 183 | */ |
| 152 | void generic_smp_call_function_single_interrupt(void) | 184 | void generic_smp_call_function_single_interrupt(void) |
| 153 | { | 185 | { |
| 154 | struct llist_node *entry, *next; | 186 | struct llist_node *entry; |
| 187 | struct call_single_data *csd, *csd_next; | ||
| 155 | 188 | ||
| 156 | /* | 189 | /* |
| 157 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 190 | * Shouldn't receive this interrupt on a cpu that is not yet online. |
| @@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 161 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); | 194 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); |
| 162 | entry = llist_reverse_order(entry); | 195 | entry = llist_reverse_order(entry); |
| 163 | 196 | ||
| 164 | while (entry) { | 197 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
| 165 | struct call_single_data *csd; | ||
| 166 | |||
| 167 | next = entry->next; | ||
| 168 | |||
| 169 | csd = llist_entry(entry, struct call_single_data, llist); | ||
| 170 | csd->func(csd->info); | 198 | csd->func(csd->info); |
| 171 | csd_unlock(csd); | 199 | csd_unlock(csd); |
| 172 | |||
| 173 | entry = next; | ||
| 174 | } | 200 | } |
| 175 | } | 201 | } |
| 176 | 202 | ||
| 177 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 178 | |||
| 179 | /* | 203 | /* |
| 180 | * smp_call_function_single - Run a function on a specific CPU | 204 | * smp_call_function_single - Run a function on a specific CPU |
| 181 | * @func: The function to run. This must be fast and non-blocking. | 205 | * @func: The function to run. This must be fast and non-blocking. |
| @@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |||
| 187 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | 211 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
| 188 | int wait) | 212 | int wait) |
| 189 | { | 213 | { |
| 190 | struct call_single_data d = { | ||
| 191 | .flags = 0, | ||
| 192 | }; | ||
| 193 | unsigned long flags; | ||
| 194 | int this_cpu; | 214 | int this_cpu; |
| 195 | int err = 0; | 215 | int err; |
| 196 | 216 | ||
| 197 | /* | 217 | /* |
| 198 | * prevent preemption and reschedule on another processor, | 218 | * prevent preemption and reschedule on another processor, |
| @@ -209,32 +229,41 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | |||
| 209 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 229 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
| 210 | && !oops_in_progress); | 230 | && !oops_in_progress); |
| 211 | 231 | ||
| 212 | if (cpu == this_cpu) { | 232 | err = generic_exec_single(cpu, NULL, func, info, wait); |
| 213 | local_irq_save(flags); | ||
| 214 | func(info); | ||
| 215 | local_irq_restore(flags); | ||
| 216 | } else { | ||
| 217 | if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | ||
| 218 | struct call_single_data *csd = &d; | ||
| 219 | 233 | ||
| 220 | if (!wait) | 234 | put_cpu(); |
| 221 | csd = &__get_cpu_var(csd_data); | ||
| 222 | 235 | ||
| 223 | csd_lock(csd); | 236 | return err; |
| 237 | } | ||
| 238 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 224 | 239 | ||
| 225 | csd->func = func; | 240 | /** |
| 226 | csd->info = info; | 241 | * smp_call_function_single_async(): Run an asynchronous function on a |
| 227 | generic_exec_single(cpu, csd, wait); | 242 | * specific CPU. |
| 228 | } else { | 243 | * @cpu: The CPU to run on. |
| 229 | err = -ENXIO; /* CPU not online */ | 244 | * @csd: Pre-allocated and setup data structure |
| 230 | } | 245 | * |
| 231 | } | 246 | * Like smp_call_function_single(), but the call is asynchonous and |
| 247 | * can thus be done from contexts with disabled interrupts. | ||
| 248 | * | ||
| 249 | * The caller passes his own pre-allocated data structure | ||
| 250 | * (ie: embedded in an object) and is responsible for synchronizing it | ||
| 251 | * such that the IPIs performed on the @csd are strictly serialized. | ||
| 252 | * | ||
| 253 | * NOTE: Be careful, there is unfortunately no current debugging facility to | ||
| 254 | * validate the correctness of this serialization. | ||
| 255 | */ | ||
| 256 | int smp_call_function_single_async(int cpu, struct call_single_data *csd) | ||
| 257 | { | ||
| 258 | int err = 0; | ||
| 232 | 259 | ||
| 233 | put_cpu(); | 260 | preempt_disable(); |
| 261 | err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); | ||
| 262 | preempt_enable(); | ||
| 234 | 263 | ||
| 235 | return err; | 264 | return err; |
| 236 | } | 265 | } |
| 237 | EXPORT_SYMBOL(smp_call_function_single); | 266 | EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
| 238 | 267 | ||
| 239 | /* | 268 | /* |
| 240 | * smp_call_function_any - Run a function on any of the given cpus | 269 | * smp_call_function_any - Run a function on any of the given cpus |
| @@ -280,44 +309,6 @@ call: | |||
| 280 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 309 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
| 281 | 310 | ||
| 282 | /** | 311 | /** |
| 283 | * __smp_call_function_single(): Run a function on a specific CPU | ||
| 284 | * @cpu: The CPU to run on. | ||
| 285 | * @data: Pre-allocated and setup data structure | ||
| 286 | * @wait: If true, wait until function has completed on specified CPU. | ||
| 287 | * | ||
| 288 | * Like smp_call_function_single(), but allow caller to pass in a | ||
| 289 | * pre-allocated data structure. Useful for embedding @data inside | ||
| 290 | * other structures, for instance. | ||
| 291 | */ | ||
| 292 | void __smp_call_function_single(int cpu, struct call_single_data *csd, | ||
| 293 | int wait) | ||
| 294 | { | ||
| 295 | unsigned int this_cpu; | ||
| 296 | unsigned long flags; | ||
| 297 | |||
| 298 | this_cpu = get_cpu(); | ||
| 299 | /* | ||
| 300 | * Can deadlock when called with interrupts disabled. | ||
| 301 | * We allow cpu's that are not yet online though, as no one else can | ||
| 302 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 303 | * can't happen. | ||
| 304 | */ | ||
| 305 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | ||
| 306 | && !oops_in_progress); | ||
| 307 | |||
| 308 | if (cpu == this_cpu) { | ||
| 309 | local_irq_save(flags); | ||
| 310 | csd->func(csd->info); | ||
| 311 | local_irq_restore(flags); | ||
| 312 | } else { | ||
| 313 | csd_lock(csd); | ||
| 314 | generic_exec_single(cpu, csd, wait); | ||
| 315 | } | ||
| 316 | put_cpu(); | ||
| 317 | } | ||
| 318 | EXPORT_SYMBOL_GPL(__smp_call_function_single); | ||
| 319 | |||
| 320 | /** | ||
| 321 | * smp_call_function_many(): Run a function on a set of other CPUs. | 312 | * smp_call_function_many(): Run a function on a set of other CPUs. |
| 322 | * @mask: The set of cpus to run on (only runs on online subset). | 313 | * @mask: The set of cpus to run on (only runs on online subset). |
| 323 | * @func: The function to run. This must be fast and non-blocking. | 314 | * @func: The function to run. This must be fast and non-blocking. |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 490fcbb1dc5b..b50990a5bea0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
| 26 | #include <linux/smpboot.h> | 26 | #include <linux/smpboot.h> |
| 27 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
| 28 | #include <linux/irq.h> | ||
| 28 | 29 | ||
| 29 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
| 30 | #include <trace/events/irq.h> | 31 | #include <trace/events/irq.h> |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
| 293 | */ | 293 | */ |
| 294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
| 295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
| 296 | &call_args, 0); | 296 | &call_args, 1); |
| 297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
| 298 | preempt_enable(); | 298 | preempt_enable(); |
| 299 | 299 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index c0a58be780a4..fba0f29401ea 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -174,10 +174,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) | |||
| 174 | 174 | ||
| 175 | /* normalize: avoid signed division (rounding problems) */ | 175 | /* normalize: avoid signed division (rounding problems) */ |
| 176 | error = -ESRCH; | 176 | error = -ESRCH; |
| 177 | if (niceval < -20) | 177 | if (niceval < MIN_NICE) |
| 178 | niceval = -20; | 178 | niceval = MIN_NICE; |
| 179 | if (niceval > 19) | 179 | if (niceval > MAX_NICE) |
| 180 | niceval = 19; | 180 | niceval = MAX_NICE; |
| 181 | 181 | ||
| 182 | rcu_read_lock(); | 182 | rcu_read_lock(); |
| 183 | read_lock(&tasklist_lock); | 183 | read_lock(&tasklist_lock); |
| @@ -1996,6 +1996,21 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 1996 | if (arg2 || arg3 || arg4 || arg5) | 1996 | if (arg2 || arg3 || arg4 || arg5) |
| 1997 | return -EINVAL; | 1997 | return -EINVAL; |
| 1998 | return current->no_new_privs ? 1 : 0; | 1998 | return current->no_new_privs ? 1 : 0; |
| 1999 | case PR_GET_THP_DISABLE: | ||
| 2000 | if (arg2 || arg3 || arg4 || arg5) | ||
| 2001 | return -EINVAL; | ||
| 2002 | error = !!(me->mm->def_flags & VM_NOHUGEPAGE); | ||
| 2003 | break; | ||
| 2004 | case PR_SET_THP_DISABLE: | ||
| 2005 | if (arg3 || arg4 || arg5) | ||
| 2006 | return -EINVAL; | ||
| 2007 | down_write(&me->mm->mmap_sem); | ||
| 2008 | if (arg2) | ||
| 2009 | me->mm->def_flags |= VM_NOHUGEPAGE; | ||
| 2010 | else | ||
| 2011 | me->mm->def_flags &= ~VM_NOHUGEPAGE; | ||
| 2012 | up_write(&me->mm->mmap_sem); | ||
| 2013 | break; | ||
| 1999 | default: | 2014 | default: |
| 2000 | error = -EINVAL; | 2015 | error = -EINVAL; |
| 2001 | break; | 2016 | break; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7078052284fd..bc8d1b74a6b9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -146,11 +146,13 @@ cond_syscall(sys_io_destroy); | |||
| 146 | cond_syscall(sys_io_submit); | 146 | cond_syscall(sys_io_submit); |
| 147 | cond_syscall(sys_io_cancel); | 147 | cond_syscall(sys_io_cancel); |
| 148 | cond_syscall(sys_io_getevents); | 148 | cond_syscall(sys_io_getevents); |
| 149 | cond_syscall(sys_sysfs); | ||
| 149 | cond_syscall(sys_syslog); | 150 | cond_syscall(sys_syslog); |
| 150 | cond_syscall(sys_process_vm_readv); | 151 | cond_syscall(sys_process_vm_readv); |
| 151 | cond_syscall(sys_process_vm_writev); | 152 | cond_syscall(sys_process_vm_writev); |
| 152 | cond_syscall(compat_sys_process_vm_readv); | 153 | cond_syscall(compat_sys_process_vm_readv); |
| 153 | cond_syscall(compat_sys_process_vm_writev); | 154 | cond_syscall(compat_sys_process_vm_writev); |
| 155 | cond_syscall(sys_uselib); | ||
| 154 | 156 | ||
| 155 | /* arch-specific weak syscall entries */ | 157 | /* arch-specific weak syscall entries */ |
| 156 | cond_syscall(sys_pciconfig_read); | 158 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 49e13e1f8fe6..74f5b580fe34 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max; | |||
| 112 | #ifndef CONFIG_MMU | 112 | #ifndef CONFIG_MMU |
| 113 | extern int sysctl_nr_trim_pages; | 113 | extern int sysctl_nr_trim_pages; |
| 114 | #endif | 114 | #endif |
| 115 | #ifdef CONFIG_BLOCK | ||
| 116 | extern int blk_iopoll_enabled; | ||
| 117 | #endif | ||
| 118 | 115 | ||
| 119 | /* Constants used for minimum and maximum */ | 116 | /* Constants used for minimum and maximum */ |
| 120 | #ifdef CONFIG_LOCKUP_DETECTOR | 117 | #ifdef CONFIG_LOCKUP_DETECTOR |
| @@ -126,7 +123,7 @@ static int __maybe_unused neg_one = -1; | |||
| 126 | static int zero; | 123 | static int zero; |
| 127 | static int __maybe_unused one = 1; | 124 | static int __maybe_unused one = 1; |
| 128 | static int __maybe_unused two = 2; | 125 | static int __maybe_unused two = 2; |
| 129 | static int __maybe_unused three = 3; | 126 | static int __maybe_unused four = 4; |
| 130 | static unsigned long one_ul = 1; | 127 | static unsigned long one_ul = 1; |
| 131 | static int one_hundred = 100; | 128 | static int one_hundred = 100; |
| 132 | #ifdef CONFIG_PRINTK | 129 | #ifdef CONFIG_PRINTK |
| @@ -144,6 +141,11 @@ static int min_percpu_pagelist_fract = 8; | |||
| 144 | static int ngroups_max = NGROUPS_MAX; | 141 | static int ngroups_max = NGROUPS_MAX; |
| 145 | static const int cap_last_cap = CAP_LAST_CAP; | 142 | static const int cap_last_cap = CAP_LAST_CAP; |
| 146 | 143 | ||
| 144 | /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ | ||
| 145 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
| 146 | static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); | ||
| 147 | #endif | ||
| 148 | |||
| 147 | #ifdef CONFIG_INOTIFY_USER | 149 | #ifdef CONFIG_INOTIFY_USER |
| 148 | #include <linux/inotify.h> | 150 | #include <linux/inotify.h> |
| 149 | #endif | 151 | #endif |
| @@ -386,13 +388,6 @@ static struct ctl_table kern_table[] = { | |||
| 386 | .proc_handler = proc_dointvec, | 388 | .proc_handler = proc_dointvec, |
| 387 | }, | 389 | }, |
| 388 | { | 390 | { |
| 389 | .procname = "numa_balancing_migrate_deferred", | ||
| 390 | .data = &sysctl_numa_balancing_migrate_deferred, | ||
| 391 | .maxlen = sizeof(unsigned int), | ||
| 392 | .mode = 0644, | ||
| 393 | .proc_handler = proc_dointvec, | ||
| 394 | }, | ||
| 395 | { | ||
| 396 | .procname = "numa_balancing", | 391 | .procname = "numa_balancing", |
| 397 | .data = NULL, /* filled in by handler */ | 392 | .data = NULL, /* filled in by handler */ |
| 398 | .maxlen = sizeof(unsigned int), | 393 | .maxlen = sizeof(unsigned int), |
| @@ -995,6 +990,7 @@ static struct ctl_table kern_table[] = { | |||
| 995 | .maxlen = sizeof(unsigned long), | 990 | .maxlen = sizeof(unsigned long), |
| 996 | .mode = 0644, | 991 | .mode = 0644, |
| 997 | .proc_handler = proc_dohung_task_timeout_secs, | 992 | .proc_handler = proc_dohung_task_timeout_secs, |
| 993 | .extra2 = &hung_task_timeout_max, | ||
| 998 | }, | 994 | }, |
| 999 | { | 995 | { |
| 1000 | .procname = "hung_task_warnings", | 996 | .procname = "hung_task_warnings", |
| @@ -1094,15 +1090,6 @@ static struct ctl_table kern_table[] = { | |||
| 1094 | .proc_handler = proc_dointvec, | 1090 | .proc_handler = proc_dointvec, |
| 1095 | }, | 1091 | }, |
| 1096 | #endif | 1092 | #endif |
| 1097 | #ifdef CONFIG_BLOCK | ||
| 1098 | { | ||
| 1099 | .procname = "blk_iopoll", | ||
| 1100 | .data = &blk_iopoll_enabled, | ||
| 1101 | .maxlen = sizeof(int), | ||
| 1102 | .mode = 0644, | ||
| 1103 | .proc_handler = proc_dointvec, | ||
| 1104 | }, | ||
| 1105 | #endif | ||
| 1106 | { } | 1093 | { } |
| 1107 | }; | 1094 | }; |
| 1108 | 1095 | ||
| @@ -1283,7 +1270,7 @@ static struct ctl_table vm_table[] = { | |||
| 1283 | .mode = 0644, | 1270 | .mode = 0644, |
| 1284 | .proc_handler = drop_caches_sysctl_handler, | 1271 | .proc_handler = drop_caches_sysctl_handler, |
| 1285 | .extra1 = &one, | 1272 | .extra1 = &one, |
| 1286 | .extra2 = &three, | 1273 | .extra2 = &four, |
| 1287 | }, | 1274 | }, |
| 1288 | #ifdef CONFIG_COMPACTION | 1275 | #ifdef CONFIG_COMPACTION |
| 1289 | { | 1276 | { |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 3ce6e8c5f3fc..f448513a45ed 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -124,7 +124,7 @@ config NO_HZ_FULL | |||
| 124 | endchoice | 124 | endchoice |
| 125 | 125 | ||
| 126 | config NO_HZ_FULL_ALL | 126 | config NO_HZ_FULL_ALL |
| 127 | bool "Full dynticks system on all CPUs by default" | 127 | bool "Full dynticks system on all CPUs by default (except CPU 0)" |
| 128 | depends on NO_HZ_FULL | 128 | depends on NO_HZ_FULL |
| 129 | help | 129 | help |
| 130 | If the user doesn't pass the nohz_full boot option to | 130 | If the user doesn't pass the nohz_full boot option to |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 9250130646f5..57a413fd0ebf 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
| @@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o | |||
| 3 | 3 | ||
| 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
| 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
| 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o | 6 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) |
| 7 | obj-y += tick-broadcast.o | ||
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o | ||
| 9 | endif | ||
| 7 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o | 10 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o |
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | 11 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o |
| 9 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 12 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 086ad6043bcb..ad362c260ef4 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); | 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); |
| 441 | 441 | ||
| 442 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 443 | { | ||
| 444 | clockevents_config(dev, freq); | ||
| 445 | |||
| 446 | if (dev->mode == CLOCK_EVT_MODE_ONESHOT) | ||
| 447 | return clockevents_program_event(dev, dev->next_event, false); | ||
| 448 | |||
| 449 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | ||
| 450 | dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 442 | /** | 455 | /** |
| 443 | * clockevents_update_freq - Update frequency and reprogram a clock event device. | 456 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
| 444 | * @dev: device to modify | 457 | * @dev: device to modify |
| @@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register); | |||
| 446 | * | 459 | * |
| 447 | * Reconfigure and reprogram a clock event device in oneshot | 460 | * Reconfigure and reprogram a clock event device in oneshot |
| 448 | * mode. Must be called on the cpu for which the device delivers per | 461 | * mode. Must be called on the cpu for which the device delivers per |
| 449 | * cpu timer events with interrupts disabled! Returns 0 on success, | 462 | * cpu timer events. If called for the broadcast device the core takes |
| 450 | * -ETIME when the event is in the past. | 463 | * care of serialization. |
| 464 | * | ||
| 465 | * Returns 0 on success, -ETIME when the event is in the past. | ||
| 451 | */ | 466 | */ |
| 452 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) | 467 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
| 453 | { | 468 | { |
| 454 | clockevents_config(dev, freq); | 469 | unsigned long flags; |
| 455 | 470 | int ret; | |
| 456 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | ||
| 457 | return 0; | ||
| 458 | 471 | ||
| 459 | return clockevents_program_event(dev, dev->next_event, false); | 472 | local_irq_save(flags); |
| 473 | ret = tick_broadcast_update_freq(dev, freq); | ||
| 474 | if (ret == -ENODEV) | ||
| 475 | ret = __clockevents_update_freq(dev, freq); | ||
| 476 | local_irq_restore(flags); | ||
| 477 | return ret; | ||
| 460 | } | 478 | } |
| 461 | 479 | ||
| 462 | /* | 480 | /* |
| @@ -524,12 +542,13 @@ void clockevents_resume(void) | |||
| 524 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 542 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 525 | /** | 543 | /** |
| 526 | * clockevents_notify - notification about relevant events | 544 | * clockevents_notify - notification about relevant events |
| 545 | * Returns 0 on success, any other value on error | ||
| 527 | */ | 546 | */ |
| 528 | void clockevents_notify(unsigned long reason, void *arg) | 547 | int clockevents_notify(unsigned long reason, void *arg) |
| 529 | { | 548 | { |
| 530 | struct clock_event_device *dev, *tmp; | 549 | struct clock_event_device *dev, *tmp; |
| 531 | unsigned long flags; | 550 | unsigned long flags; |
| 532 | int cpu; | 551 | int cpu, ret = 0; |
| 533 | 552 | ||
| 534 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 553 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
| 535 | 554 | ||
| @@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 542 | 561 | ||
| 543 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: | 562 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: |
| 544 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: | 563 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: |
| 545 | tick_broadcast_oneshot_control(reason); | 564 | ret = tick_broadcast_oneshot_control(reason); |
| 546 | break; | 565 | break; |
| 547 | 566 | ||
| 548 | case CLOCK_EVT_NOTIFY_CPU_DYING: | 567 | case CLOCK_EVT_NOTIFY_CPU_DYING: |
| @@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 585 | break; | 604 | break; |
| 586 | } | 605 | } |
| 587 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 606 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
| 607 | return ret; | ||
| 588 | } | 608 | } |
| 589 | EXPORT_SYMBOL_GPL(clockevents_notify); | 609 | EXPORT_SYMBOL_GPL(clockevents_notify); |
| 590 | 610 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af8d1d4f3d55..419a52cecd20 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
| 514 | next.tv_sec++; | 514 | next.tv_sec++; |
| 515 | next.tv_nsec -= NSEC_PER_SEC; | 515 | next.tv_nsec -= NSEC_PER_SEC; |
| 516 | } | 516 | } |
| 517 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); | 517 | queue_delayed_work(system_power_efficient_wq, |
| 518 | &sync_cmos_work, timespec_to_jiffies(&next)); | ||
| 518 | } | 519 | } |
| 519 | 520 | ||
| 520 | void ntp_notify_cmos_timer(void) | 521 | void ntp_notify_cmos_timer(void) |
| 521 | { | 522 | { |
| 522 | schedule_delayed_work(&sync_cmos_work, 0); | 523 | queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); |
| 523 | } | 524 | } |
| 524 | 525 | ||
| 525 | #else | 526 | #else |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c new file mode 100644 index 000000000000..eb682d5c697c --- /dev/null +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
| @@ -0,0 +1,106 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/time/tick-broadcast-hrtimer.c | ||
| 3 | * This file emulates a local clock event device | ||
| 4 | * via a pseudo clock device. | ||
| 5 | */ | ||
| 6 | #include <linux/cpu.h> | ||
| 7 | #include <linux/err.h> | ||
| 8 | #include <linux/hrtimer.h> | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/percpu.h> | ||
| 11 | #include <linux/profile.h> | ||
| 12 | #include <linux/clockchips.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/smp.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | |||
| 17 | #include "tick-internal.h" | ||
| 18 | |||
| 19 | static struct hrtimer bctimer; | ||
| 20 | |||
| 21 | static void bc_set_mode(enum clock_event_mode mode, | ||
| 22 | struct clock_event_device *bc) | ||
| 23 | { | ||
| 24 | switch (mode) { | ||
| 25 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 26 | /* | ||
| 27 | * Note, we cannot cancel the timer here as we might | ||
| 28 | * run into the following live lock scenario: | ||
| 29 | * | ||
| 30 | * cpu 0 cpu1 | ||
| 31 | * lock(broadcast_lock); | ||
| 32 | * hrtimer_interrupt() | ||
| 33 | * bc_handler() | ||
| 34 | * tick_handle_oneshot_broadcast(); | ||
| 35 | * lock(broadcast_lock); | ||
| 36 | * hrtimer_cancel() | ||
| 37 | * wait_for_callback() | ||
| 38 | */ | ||
| 39 | hrtimer_try_to_cancel(&bctimer); | ||
| 40 | break; | ||
| 41 | default: | ||
| 42 | break; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * This is called from the guts of the broadcast code when the cpu | ||
| 48 | * which is about to enter idle has the earliest broadcast timer event. | ||
| 49 | */ | ||
| 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | ||
| 51 | { | ||
| 52 | /* | ||
| 53 | * We try to cancel the timer first. If the callback is on | ||
| 54 | * flight on some other cpu then we let it handle it. If we | ||
| 55 | * were able to cancel the timer nothing can rearm it as we | ||
| 56 | * own broadcast_lock. | ||
| 57 | * | ||
| 58 | * However we can also be called from the event handler of | ||
| 59 | * ce_broadcast_hrtimer itself when it expires. We cannot | ||
| 60 | * restart the timer because we are in the callback, but we | ||
| 61 | * can set the expiry time and let the callback return | ||
| 62 | * HRTIMER_RESTART. | ||
| 63 | */ | ||
| 64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | ||
| 65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | ||
| 66 | /* Bind the "device" to the cpu */ | ||
| 67 | bc->bound_on = smp_processor_id(); | ||
| 68 | } else if (bc->bound_on == smp_processor_id()) { | ||
| 69 | hrtimer_set_expires(&bctimer, expires); | ||
| 70 | } | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static struct clock_event_device ce_broadcast_hrtimer = { | ||
| 75 | .set_mode = bc_set_mode, | ||
| 76 | .set_next_ktime = bc_set_next, | ||
| 77 | .features = CLOCK_EVT_FEAT_ONESHOT | | ||
| 78 | CLOCK_EVT_FEAT_KTIME | | ||
| 79 | CLOCK_EVT_FEAT_HRTIMER, | ||
| 80 | .rating = 0, | ||
| 81 | .bound_on = -1, | ||
| 82 | .min_delta_ns = 1, | ||
| 83 | .max_delta_ns = KTIME_MAX, | ||
| 84 | .min_delta_ticks = 1, | ||
| 85 | .max_delta_ticks = ULONG_MAX, | ||
| 86 | .mult = 1, | ||
| 87 | .shift = 0, | ||
| 88 | .cpumask = cpu_all_mask, | ||
| 89 | }; | ||
| 90 | |||
| 91 | static enum hrtimer_restart bc_handler(struct hrtimer *t) | ||
| 92 | { | ||
| 93 | ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); | ||
| 94 | |||
| 95 | if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX) | ||
| 96 | return HRTIMER_NORESTART; | ||
| 97 | |||
| 98 | return HRTIMER_RESTART; | ||
| 99 | } | ||
| 100 | |||
| 101 | void tick_setup_hrtimer_broadcast(void) | ||
| 102 | { | ||
| 103 | hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 104 | bctimer.function = bc_handler; | ||
| 105 | clockevents_register_device(&ce_broadcast_hrtimer); | ||
| 106 | } | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 98977a57ac72..64c5990fd500 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev) | |||
| 120 | return (dev && tick_broadcast_device.evtdev == dev); | 120 | return (dev && tick_broadcast_device.evtdev == dev); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 124 | { | ||
| 125 | int ret = -ENODEV; | ||
| 126 | |||
| 127 | if (tick_is_broadcast_device(dev)) { | ||
| 128 | raw_spin_lock(&tick_broadcast_lock); | ||
| 129 | ret = __clockevents_update_freq(dev, freq); | ||
| 130 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 131 | } | ||
| 132 | return ret; | ||
| 133 | } | ||
| 134 | |||
| 135 | |||
| 123 | static void err_broadcast(const struct cpumask *mask) | 136 | static void err_broadcast(const struct cpumask *mask) |
| 124 | { | 137 | { |
| 125 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | 138 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); |
| @@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask) | |||
| 272 | */ | 285 | */ |
| 273 | static void tick_do_periodic_broadcast(void) | 286 | static void tick_do_periodic_broadcast(void) |
| 274 | { | 287 | { |
| 275 | raw_spin_lock(&tick_broadcast_lock); | ||
| 276 | |||
| 277 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); | 288 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
| 278 | tick_do_broadcast(tmpmask); | 289 | tick_do_broadcast(tmpmask); |
| 279 | |||
| 280 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 281 | } | 290 | } |
| 282 | 291 | ||
| 283 | /* | 292 | /* |
| @@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 287 | { | 296 | { |
| 288 | ktime_t next; | 297 | ktime_t next; |
| 289 | 298 | ||
| 299 | raw_spin_lock(&tick_broadcast_lock); | ||
| 300 | |||
| 290 | tick_do_periodic_broadcast(); | 301 | tick_do_periodic_broadcast(); |
| 291 | 302 | ||
| 292 | /* | 303 | /* |
| 293 | * The device is in periodic mode. No reprogramming necessary: | 304 | * The device is in periodic mode. No reprogramming necessary: |
| 294 | */ | 305 | */ |
| 295 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | 306 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) |
| 296 | return; | 307 | goto unlock; |
| 297 | 308 | ||
| 298 | /* | 309 | /* |
| 299 | * Setup the next period for devices, which do not have | 310 | * Setup the next period for devices, which do not have |
| @@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 306 | next = ktime_add(next, tick_period); | 317 | next = ktime_add(next, tick_period); |
| 307 | 318 | ||
| 308 | if (!clockevents_program_event(dev, next, false)) | 319 | if (!clockevents_program_event(dev, next, false)) |
| 309 | return; | 320 | goto unlock; |
| 310 | tick_do_periodic_broadcast(); | 321 | tick_do_periodic_broadcast(); |
| 311 | } | 322 | } |
| 323 | unlock: | ||
| 324 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 312 | } | 325 | } |
| 313 | 326 | ||
| 314 | /* | 327 | /* |
| @@ -630,24 +643,61 @@ again: | |||
| 630 | raw_spin_unlock(&tick_broadcast_lock); | 643 | raw_spin_unlock(&tick_broadcast_lock); |
| 631 | } | 644 | } |
| 632 | 645 | ||
| 646 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) | ||
| 647 | { | ||
| 648 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | ||
| 649 | return 0; | ||
| 650 | if (bc->next_event.tv64 == KTIME_MAX) | ||
| 651 | return 0; | ||
| 652 | return bc->bound_on == cpu ? -EBUSY : 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static void broadcast_shutdown_local(struct clock_event_device *bc, | ||
| 656 | struct clock_event_device *dev) | ||
| 657 | { | ||
| 658 | /* | ||
| 659 | * For hrtimer based broadcasting we cannot shutdown the cpu | ||
| 660 | * local device if our own event is the first one to expire or | ||
| 661 | * if we own the broadcast timer. | ||
| 662 | */ | ||
| 663 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | ||
| 664 | if (broadcast_needs_cpu(bc, smp_processor_id())) | ||
| 665 | return; | ||
| 666 | if (dev->next_event.tv64 < bc->next_event.tv64) | ||
| 667 | return; | ||
| 668 | } | ||
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 670 | } | ||
| 671 | |||
| 672 | static void broadcast_move_bc(int deadcpu) | ||
| 673 | { | ||
| 674 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
| 675 | |||
| 676 | if (!bc || !broadcast_needs_cpu(bc, deadcpu)) | ||
| 677 | return; | ||
| 678 | /* This moves the broadcast assignment to this cpu */ | ||
| 679 | clockevents_program_event(bc, bc->next_event, 1); | ||
| 680 | } | ||
| 681 | |||
| 633 | /* | 682 | /* |
| 634 | * Powerstate information: The system enters/leaves a state, where | 683 | * Powerstate information: The system enters/leaves a state, where |
| 635 | * affected devices might stop | 684 | * affected devices might stop |
| 685 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | ||
| 636 | */ | 686 | */ |
| 637 | void tick_broadcast_oneshot_control(unsigned long reason) | 687 | int tick_broadcast_oneshot_control(unsigned long reason) |
| 638 | { | 688 | { |
| 639 | struct clock_event_device *bc, *dev; | 689 | struct clock_event_device *bc, *dev; |
| 640 | struct tick_device *td; | 690 | struct tick_device *td; |
| 641 | unsigned long flags; | 691 | unsigned long flags; |
| 642 | ktime_t now; | 692 | ktime_t now; |
| 643 | int cpu; | 693 | int cpu, ret = 0; |
| 644 | 694 | ||
| 645 | /* | 695 | /* |
| 646 | * Periodic mode does not care about the enter/exit of power | 696 | * Periodic mode does not care about the enter/exit of power |
| 647 | * states | 697 | * states |
| 648 | */ | 698 | */ |
| 649 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 699 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 650 | return; | 700 | return 0; |
| 651 | 701 | ||
| 652 | /* | 702 | /* |
| 653 | * We are called with preemtion disabled from the depth of the | 703 | * We are called with preemtion disabled from the depth of the |
| @@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 658 | dev = td->evtdev; | 708 | dev = td->evtdev; |
| 659 | 709 | ||
| 660 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 710 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
| 661 | return; | 711 | return 0; |
| 662 | 712 | ||
| 663 | bc = tick_broadcast_device.evtdev; | 713 | bc = tick_broadcast_device.evtdev; |
| 664 | 714 | ||
| @@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 666 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 716 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
| 667 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { | 717 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 668 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); | 718 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 719 | broadcast_shutdown_local(bc, dev); |
| 670 | /* | 720 | /* |
| 671 | * We only reprogram the broadcast timer if we | 721 | * We only reprogram the broadcast timer if we |
| 672 | * did not mark ourself in the force mask and | 722 | * did not mark ourself in the force mask and |
| @@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 679 | dev->next_event.tv64 < bc->next_event.tv64) | 729 | dev->next_event.tv64 < bc->next_event.tv64) |
| 680 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); | 730 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); |
| 681 | } | 731 | } |
| 732 | /* | ||
| 733 | * If the current CPU owns the hrtimer broadcast | ||
| 734 | * mechanism, it cannot go deep idle and we remove the | ||
| 735 | * CPU from the broadcast mask. We don't have to go | ||
| 736 | * through the EXIT path as the local timer is not | ||
| 737 | * shutdown. | ||
| 738 | */ | ||
| 739 | ret = broadcast_needs_cpu(bc, cpu); | ||
| 740 | if (ret) | ||
| 741 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | ||
| 682 | } else { | 742 | } else { |
| 683 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { | 743 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 684 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 744 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| @@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 746 | } | 806 | } |
| 747 | out: | 807 | out: |
| 748 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 808 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 809 | return ret; | ||
| 749 | } | 810 | } |
| 750 | 811 | ||
| 751 | /* | 812 | /* |
| @@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 852 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | 913 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
| 853 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | 914 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
| 854 | 915 | ||
| 916 | broadcast_move_bc(cpu); | ||
| 917 | |||
| 855 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 918 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 856 | } | 919 | } |
| 857 | 920 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 20b2fe37d105..015661279b68 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -98,18 +98,19 @@ static void tick_periodic(int cpu) | |||
| 98 | void tick_handle_periodic(struct clock_event_device *dev) | 98 | void tick_handle_periodic(struct clock_event_device *dev) |
| 99 | { | 99 | { |
| 100 | int cpu = smp_processor_id(); | 100 | int cpu = smp_processor_id(); |
| 101 | ktime_t next; | 101 | ktime_t next = dev->next_event; |
| 102 | 102 | ||
| 103 | tick_periodic(cpu); | 103 | tick_periodic(cpu); |
| 104 | 104 | ||
| 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) |
| 106 | return; | 106 | return; |
| 107 | /* | ||
| 108 | * Setup the next period for devices, which do not have | ||
| 109 | * periodic mode: | ||
| 110 | */ | ||
| 111 | next = ktime_add(dev->next_event, tick_period); | ||
| 112 | for (;;) { | 107 | for (;;) { |
| 108 | /* | ||
| 109 | * Setup the next period for devices, which do not have | ||
| 110 | * periodic mode: | ||
| 111 | */ | ||
| 112 | next = ktime_add(next, tick_period); | ||
| 113 | |||
| 113 | if (!clockevents_program_event(dev, next, false)) | 114 | if (!clockevents_program_event(dev, next, false)) |
| 114 | return; | 115 | return; |
| 115 | /* | 116 | /* |
| @@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
| 118 | * to be sure we're using a real hardware clocksource. | 119 | * to be sure we're using a real hardware clocksource. |
| 119 | * Otherwise we could get trapped in an infinite | 120 | * Otherwise we could get trapped in an infinite |
| 120 | * loop, as the tick_periodic() increments jiffies, | 121 | * loop, as the tick_periodic() increments jiffies, |
| 121 | * when then will increment time, posibly causing | 122 | * which then will increment time, possibly causing |
| 122 | * the loop to trigger again and again. | 123 | * the loop to trigger again and again. |
| 123 | */ | 124 | */ |
| 124 | if (timekeeping_valid_for_hres()) | 125 | if (timekeeping_valid_for_hres()) |
| 125 | tick_periodic(cpu); | 126 | tick_periodic(cpu); |
| 126 | next = ktime_add(next, tick_period); | ||
| 127 | } | 127 | } |
| 128 | } | 128 | } |
| 129 | 129 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 8329669b51ec..7ab92b19965a 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | |||
| 46 | extern void tick_resume_oneshot(void); | 46 | extern void tick_resume_oneshot(void); |
| 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
| 49 | extern void tick_broadcast_oneshot_control(unsigned long reason); | 49 | extern int tick_broadcast_oneshot_control(unsigned long reason); |
| 50 | extern void tick_broadcast_switch_to_oneshot(void); | 50 | extern void tick_broadcast_switch_to_oneshot(void); |
| 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| @@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 58 | { | 58 | { |
| 59 | BUG(); | 59 | BUG(); |
| 60 | } | 60 | } |
| 61 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 61 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
| @@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 87 | { | 87 | { |
| 88 | BUG(); | 88 | BUG(); |
| 89 | } | 89 | } |
| 90 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 90 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| 93 | { | 93 | { |
| @@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void); | |||
| 111 | extern void tick_broadcast_init(void); | 111 | extern void tick_broadcast_init(void); |
| 112 | extern void | 112 | extern void |
| 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
| 114 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 114 | 115 | ||
| 115 | #else /* !BROADCAST */ | 116 | #else /* !BROADCAST */ |
| 116 | 117 | ||
| @@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | |||
| 133 | static inline void tick_suspend_broadcast(void) { } | 134 | static inline void tick_suspend_broadcast(void) { } |
| 134 | static inline int tick_resume_broadcast(void) { return 0; } | 135 | static inline int tick_resume_broadcast(void) { return 0; } |
| 135 | static inline void tick_broadcast_init(void) { } | 136 | static inline void tick_broadcast_init(void) { } |
| 137 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, | ||
| 138 | u32 freq) { return -ENODEV; } | ||
| 136 | 139 | ||
| 137 | /* | 140 | /* |
| 138 | * Set the periodic handler in non broadcast mode | 141 | * Set the periodic handler in non broadcast mode |
| @@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) | |||
| 152 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); | 155 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); |
| 153 | } | 156 | } |
| 154 | 157 | ||
| 158 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 159 | |||
| 155 | #endif | 160 | #endif |
| 156 | 161 | ||
| 157 | extern void do_timer(unsigned long ticks); | 162 | extern void do_timer(unsigned long ticks); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0aa4ce81bc16..f7df8ea21707 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
| 23 | #include <linux/stop_machine.h> | 23 | #include <linux/stop_machine.h> |
| 24 | #include <linux/pvclock_gtod.h> | 24 | #include <linux/pvclock_gtod.h> |
| 25 | #include <linux/compiler.h> | ||
| 25 | 26 | ||
| 26 | #include "tick-internal.h" | 27 | #include "tick-internal.h" |
| 27 | #include "ntp_internal.h" | 28 | #include "ntp_internal.h" |
| @@ -760,7 +761,7 @@ u64 timekeeping_max_deferment(void) | |||
| 760 | * | 761 | * |
| 761 | * XXX - Do be sure to remove it once all arches implement it. | 762 | * XXX - Do be sure to remove it once all arches implement it. |
| 762 | */ | 763 | */ |
| 763 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) | 764 | void __weak read_persistent_clock(struct timespec *ts) |
| 764 | { | 765 | { |
| 765 | ts->tv_sec = 0; | 766 | ts->tv_sec = 0; |
| 766 | ts->tv_nsec = 0; | 767 | ts->tv_nsec = 0; |
| @@ -775,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts) | |||
| 775 | * | 776 | * |
| 776 | * XXX - Do be sure to remove it once all arches implement it. | 777 | * XXX - Do be sure to remove it once all arches implement it. |
| 777 | */ | 778 | */ |
| 778 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) | 779 | void __weak read_boot_clock(struct timespec *ts) |
| 779 | { | 780 | { |
| 780 | ts->tv_sec = 0; | 781 | ts->tv_sec = 0; |
| 781 | ts->tv_nsec = 0; | 782 | ts->tv_nsec = 0; |
| @@ -1435,7 +1436,8 @@ void update_wall_time(void) | |||
| 1435 | out: | 1436 | out: |
| 1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1437 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
| 1437 | if (clock_set) | 1438 | if (clock_set) |
| 1438 | clock_was_set(); | 1439 | /* Have to call _delayed version, since in irq context*/ |
| 1440 | clock_was_set_delayed(); | ||
| 1439 | } | 1441 | } |
| 1440 | 1442 | ||
| 1441 | /** | 1443 | /** |
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 802433a4f5eb..4d54f97558df 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
| 23 | 23 | ||
| 24 | #include "timekeeping_internal.h" | ||
| 25 | |||
| 24 | static unsigned int sleep_time_bin[32] = {0}; | 26 | static unsigned int sleep_time_bin[32] = {0}; |
| 25 | 27 | ||
| 26 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) | 28 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) |
diff --git a/kernel/timer.c b/kernel/timer.c index accfd241b9e5..87bd529879c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | #define CREATE_TRACE_POINTS | 52 | #define CREATE_TRACE_POINTS |
| 53 | #include <trace/events/timer.h> | 53 | #include <trace/events/timer.h> |
| 54 | 54 | ||
| 55 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 55 | __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
| 56 | 56 | ||
| 57 | EXPORT_SYMBOL(jiffies_64); | 57 | EXPORT_SYMBOL(jiffies_64); |
| 58 | 58 | ||
| @@ -81,6 +81,7 @@ struct tvec_base { | |||
| 81 | unsigned long timer_jiffies; | 81 | unsigned long timer_jiffies; |
| 82 | unsigned long next_timer; | 82 | unsigned long next_timer; |
| 83 | unsigned long active_timers; | 83 | unsigned long active_timers; |
| 84 | unsigned long all_timers; | ||
| 84 | struct tvec_root tv1; | 85 | struct tvec_root tv1; |
| 85 | struct tvec tv2; | 86 | struct tvec tv2; |
| 86 | struct tvec tv3; | 87 | struct tvec tv3; |
| @@ -337,6 +338,20 @@ void set_timer_slack(struct timer_list *timer, int slack_hz) | |||
| 337 | } | 338 | } |
| 338 | EXPORT_SYMBOL_GPL(set_timer_slack); | 339 | EXPORT_SYMBOL_GPL(set_timer_slack); |
| 339 | 340 | ||
| 341 | /* | ||
| 342 | * If the list is empty, catch up ->timer_jiffies to the current time. | ||
| 343 | * The caller must hold the tvec_base lock. Returns true if the list | ||
| 344 | * was empty and therefore ->timer_jiffies was updated. | ||
| 345 | */ | ||
| 346 | static bool catchup_timer_jiffies(struct tvec_base *base) | ||
| 347 | { | ||
| 348 | if (!base->all_timers) { | ||
| 349 | base->timer_jiffies = jiffies; | ||
| 350 | return true; | ||
| 351 | } | ||
| 352 | return false; | ||
| 353 | } | ||
| 354 | |||
| 340 | static void | 355 | static void |
| 341 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 356 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 342 | { | 357 | { |
| @@ -383,15 +398,17 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |||
| 383 | 398 | ||
| 384 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 399 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 385 | { | 400 | { |
| 401 | (void)catchup_timer_jiffies(base); | ||
| 386 | __internal_add_timer(base, timer); | 402 | __internal_add_timer(base, timer); |
| 387 | /* | 403 | /* |
| 388 | * Update base->active_timers and base->next_timer | 404 | * Update base->active_timers and base->next_timer |
| 389 | */ | 405 | */ |
| 390 | if (!tbase_get_deferrable(timer->base)) { | 406 | if (!tbase_get_deferrable(timer->base)) { |
| 391 | if (time_before(timer->expires, base->next_timer)) | 407 | if (!base->active_timers++ || |
| 408 | time_before(timer->expires, base->next_timer)) | ||
| 392 | base->next_timer = timer->expires; | 409 | base->next_timer = timer->expires; |
| 393 | base->active_timers++; | ||
| 394 | } | 410 | } |
| 411 | base->all_timers++; | ||
| 395 | } | 412 | } |
| 396 | 413 | ||
| 397 | #ifdef CONFIG_TIMER_STATS | 414 | #ifdef CONFIG_TIMER_STATS |
| @@ -671,6 +688,8 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |||
| 671 | detach_timer(timer, true); | 688 | detach_timer(timer, true); |
| 672 | if (!tbase_get_deferrable(timer->base)) | 689 | if (!tbase_get_deferrable(timer->base)) |
| 673 | base->active_timers--; | 690 | base->active_timers--; |
| 691 | base->all_timers--; | ||
| 692 | (void)catchup_timer_jiffies(base); | ||
| 674 | } | 693 | } |
| 675 | 694 | ||
| 676 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | 695 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
| @@ -685,6 +704,8 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | |||
| 685 | if (timer->expires == base->next_timer) | 704 | if (timer->expires == base->next_timer) |
| 686 | base->next_timer = base->timer_jiffies; | 705 | base->next_timer = base->timer_jiffies; |
| 687 | } | 706 | } |
| 707 | base->all_timers--; | ||
| 708 | (void)catchup_timer_jiffies(base); | ||
| 688 | return 1; | 709 | return 1; |
| 689 | } | 710 | } |
| 690 | 711 | ||
| @@ -739,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
| 739 | 760 | ||
| 740 | debug_activate(timer, expires); | 761 | debug_activate(timer, expires); |
| 741 | 762 | ||
| 742 | cpu = smp_processor_id(); | 763 | cpu = get_nohz_timer_target(pinned); |
| 743 | |||
| 744 | #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) | ||
| 745 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) | ||
| 746 | cpu = get_nohz_timer_target(); | ||
| 747 | #endif | ||
| 748 | new_base = per_cpu(tvec_bases, cpu); | 764 | new_base = per_cpu(tvec_bases, cpu); |
| 749 | 765 | ||
| 750 | if (base != new_base) { | 766 | if (base != new_base) { |
| @@ -939,8 +955,15 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
| 939 | * with the timer by holding the timer base lock. This also | 955 | * with the timer by holding the timer base lock. This also |
| 940 | * makes sure that a CPU on the way to stop its tick can not | 956 | * makes sure that a CPU on the way to stop its tick can not |
| 941 | * evaluate the timer wheel. | 957 | * evaluate the timer wheel. |
| 958 | * | ||
| 959 | * Spare the IPI for deferrable timers on idle targets though. | ||
| 960 | * The next busy ticks will take care of it. Except full dynticks | ||
| 961 | * require special care against races with idle_cpu(), lets deal | ||
| 962 | * with that later. | ||
| 942 | */ | 963 | */ |
| 943 | wake_up_nohz_cpu(cpu); | 964 | if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu)) |
| 965 | wake_up_nohz_cpu(cpu); | ||
| 966 | |||
| 944 | spin_unlock_irqrestore(&base->lock, flags); | 967 | spin_unlock_irqrestore(&base->lock, flags); |
| 945 | } | 968 | } |
| 946 | EXPORT_SYMBOL_GPL(add_timer_on); | 969 | EXPORT_SYMBOL_GPL(add_timer_on); |
| @@ -1146,6 +1169,10 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1146 | struct timer_list *timer; | 1169 | struct timer_list *timer; |
| 1147 | 1170 | ||
| 1148 | spin_lock_irq(&base->lock); | 1171 | spin_lock_irq(&base->lock); |
| 1172 | if (catchup_timer_jiffies(base)) { | ||
| 1173 | spin_unlock_irq(&base->lock); | ||
| 1174 | return; | ||
| 1175 | } | ||
| 1149 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 1176 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
| 1150 | struct list_head work_list; | 1177 | struct list_head work_list; |
| 1151 | struct list_head *head = &work_list; | 1178 | struct list_head *head = &work_list; |
| @@ -1160,7 +1187,7 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1160 | !cascade(base, &base->tv4, INDEX(2))) | 1187 | !cascade(base, &base->tv4, INDEX(2))) |
| 1161 | cascade(base, &base->tv5, INDEX(3)); | 1188 | cascade(base, &base->tv5, INDEX(3)); |
| 1162 | ++base->timer_jiffies; | 1189 | ++base->timer_jiffies; |
| 1163 | list_replace_init(base->tv1.vec + index, &work_list); | 1190 | list_replace_init(base->tv1.vec + index, head); |
| 1164 | while (!list_empty(head)) { | 1191 | while (!list_empty(head)) { |
| 1165 | void (*fn)(unsigned long); | 1192 | void (*fn)(unsigned long); |
| 1166 | unsigned long data; | 1193 | unsigned long data; |
| @@ -1523,9 +1550,8 @@ static int init_timers_cpu(int cpu) | |||
| 1523 | if (!base) | 1550 | if (!base) |
| 1524 | return -ENOMEM; | 1551 | return -ENOMEM; |
| 1525 | 1552 | ||
| 1526 | /* Make sure that tvec_base is 2 byte aligned */ | 1553 | /* Make sure tvec_base has TIMER_FLAG_MASK bits free */ |
| 1527 | if (tbase_get_deferrable(base)) { | 1554 | if (WARN_ON(base != tbase_get_base(base))) { |
| 1528 | WARN_ON(1); | ||
| 1529 | kfree(base); | 1555 | kfree(base); |
| 1530 | return -ENOMEM; | 1556 | return -ENOMEM; |
| 1531 | } | 1557 | } |
| @@ -1559,6 +1585,7 @@ static int init_timers_cpu(int cpu) | |||
| 1559 | base->timer_jiffies = jiffies; | 1585 | base->timer_jiffies = jiffies; |
| 1560 | base->next_timer = base->timer_jiffies; | 1586 | base->next_timer = base->timer_jiffies; |
| 1561 | base->active_timers = 0; | 1587 | base->active_timers = 0; |
| 1588 | base->all_timers = 0; | ||
| 1562 | return 0; | 1589 | return 0; |
| 1563 | } | 1590 | } |
| 1564 | 1591 | ||
| @@ -1648,9 +1675,9 @@ void __init init_timers(void) | |||
| 1648 | 1675 | ||
| 1649 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 1676 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
| 1650 | (void *)(long)smp_processor_id()); | 1677 | (void *)(long)smp_processor_id()); |
| 1651 | init_timer_stats(); | ||
| 1652 | |||
| 1653 | BUG_ON(err != NOTIFY_OK); | 1678 | BUG_ON(err != NOTIFY_OK); |
| 1679 | |||
| 1680 | init_timer_stats(); | ||
| 1654 | register_cpu_notifier(&timers_nb); | 1681 | register_cpu_notifier(&timers_nb); |
| 1655 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 1682 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
| 1656 | } | 1683 | } |
diff --git a/kernel/torture.c b/kernel/torture.c new file mode 100644 index 000000000000..acc9afc2f26e --- /dev/null +++ b/kernel/torture.c | |||
| @@ -0,0 +1,719 @@ | |||
| 1 | /* | ||
| 2 | * Common functions for in-kernel torture tests. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright (C) IBM Corporation, 2014 | ||
| 19 | * | ||
| 20 | * Author: Paul E. McKenney <paulmck@us.ibm.com> | ||
| 21 | * Based on kernel/rcu/torture.c. | ||
| 22 | */ | ||
| 23 | #include <linux/types.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/kthread.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <linux/spinlock.h> | ||
| 30 | #include <linux/smp.h> | ||
| 31 | #include <linux/interrupt.h> | ||
| 32 | #include <linux/sched.h> | ||
| 33 | #include <linux/atomic.h> | ||
| 34 | #include <linux/bitops.h> | ||
| 35 | #include <linux/completion.h> | ||
| 36 | #include <linux/moduleparam.h> | ||
| 37 | #include <linux/percpu.h> | ||
| 38 | #include <linux/notifier.h> | ||
| 39 | #include <linux/reboot.h> | ||
| 40 | #include <linux/freezer.h> | ||
| 41 | #include <linux/cpu.h> | ||
| 42 | #include <linux/delay.h> | ||
| 43 | #include <linux/stat.h> | ||
| 44 | #include <linux/slab.h> | ||
| 45 | #include <linux/trace_clock.h> | ||
| 46 | #include <asm/byteorder.h> | ||
| 47 | #include <linux/torture.h> | ||
| 48 | |||
| 49 | MODULE_LICENSE("GPL"); | ||
| 50 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); | ||
| 51 | |||
| 52 | static char *torture_type; | ||
| 53 | static bool verbose; | ||
| 54 | |||
| 55 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ | ||
| 56 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ | ||
| 57 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ | ||
| 58 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ | ||
| 59 | static int fullstop = FULLSTOP_RMMOD; | ||
| 60 | static DEFINE_MUTEX(fullstop_mutex); | ||
| 61 | static int *torture_runnable; | ||
| 62 | |||
| 63 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Variables for online-offline handling. Only present if CPU hotplug | ||
| 67 | * is enabled, otherwise does nothing. | ||
| 68 | */ | ||
| 69 | |||
| 70 | static struct task_struct *onoff_task; | ||
| 71 | static long onoff_holdoff; | ||
| 72 | static long onoff_interval; | ||
| 73 | static long n_offline_attempts; | ||
| 74 | static long n_offline_successes; | ||
| 75 | static unsigned long sum_offline; | ||
| 76 | static int min_offline = -1; | ||
| 77 | static int max_offline; | ||
| 78 | static long n_online_attempts; | ||
| 79 | static long n_online_successes; | ||
| 80 | static unsigned long sum_online; | ||
| 81 | static int min_online = -1; | ||
| 82 | static int max_online; | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Execute random CPU-hotplug operations at the interval specified | ||
| 86 | * by the onoff_interval. | ||
| 87 | */ | ||
| 88 | static int | ||
| 89 | torture_onoff(void *arg) | ||
| 90 | { | ||
| 91 | int cpu; | ||
| 92 | unsigned long delta; | ||
| 93 | int maxcpu = -1; | ||
| 94 | DEFINE_TORTURE_RANDOM(rand); | ||
| 95 | int ret; | ||
| 96 | unsigned long starttime; | ||
| 97 | |||
| 98 | VERBOSE_TOROUT_STRING("torture_onoff task started"); | ||
| 99 | for_each_online_cpu(cpu) | ||
| 100 | maxcpu = cpu; | ||
| 101 | WARN_ON(maxcpu < 0); | ||
| 102 | if (onoff_holdoff > 0) { | ||
| 103 | VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); | ||
| 104 | schedule_timeout_interruptible(onoff_holdoff); | ||
| 105 | VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); | ||
| 106 | } | ||
| 107 | while (!torture_must_stop()) { | ||
| 108 | cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); | ||
| 109 | if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { | ||
| 110 | if (verbose) | ||
| 111 | pr_alert("%s" TORTURE_FLAG | ||
| 112 | "torture_onoff task: offlining %d\n", | ||
| 113 | torture_type, cpu); | ||
| 114 | starttime = jiffies; | ||
| 115 | n_offline_attempts++; | ||
| 116 | ret = cpu_down(cpu); | ||
| 117 | if (ret) { | ||
| 118 | if (verbose) | ||
| 119 | pr_alert("%s" TORTURE_FLAG | ||
| 120 | "torture_onoff task: offline %d failed: errno %d\n", | ||
| 121 | torture_type, cpu, ret); | ||
| 122 | } else { | ||
| 123 | if (verbose) | ||
| 124 | pr_alert("%s" TORTURE_FLAG | ||
| 125 | "torture_onoff task: offlined %d\n", | ||
| 126 | torture_type, cpu); | ||
| 127 | n_offline_successes++; | ||
| 128 | delta = jiffies - starttime; | ||
| 129 | sum_offline += delta; | ||
| 130 | if (min_offline < 0) { | ||
| 131 | min_offline = delta; | ||
| 132 | max_offline = delta; | ||
| 133 | } | ||
| 134 | if (min_offline > delta) | ||
| 135 | min_offline = delta; | ||
| 136 | if (max_offline < delta) | ||
| 137 | max_offline = delta; | ||
| 138 | } | ||
| 139 | } else if (cpu_is_hotpluggable(cpu)) { | ||
| 140 | if (verbose) | ||
| 141 | pr_alert("%s" TORTURE_FLAG | ||
| 142 | "torture_onoff task: onlining %d\n", | ||
| 143 | torture_type, cpu); | ||
| 144 | starttime = jiffies; | ||
| 145 | n_online_attempts++; | ||
| 146 | ret = cpu_up(cpu); | ||
| 147 | if (ret) { | ||
| 148 | if (verbose) | ||
| 149 | pr_alert("%s" TORTURE_FLAG | ||
| 150 | "torture_onoff task: online %d failed: errno %d\n", | ||
| 151 | torture_type, cpu, ret); | ||
| 152 | } else { | ||
| 153 | if (verbose) | ||
| 154 | pr_alert("%s" TORTURE_FLAG | ||
| 155 | "torture_onoff task: onlined %d\n", | ||
| 156 | torture_type, cpu); | ||
| 157 | n_online_successes++; | ||
| 158 | delta = jiffies - starttime; | ||
| 159 | sum_online += delta; | ||
| 160 | if (min_online < 0) { | ||
| 161 | min_online = delta; | ||
| 162 | max_online = delta; | ||
| 163 | } | ||
| 164 | if (min_online > delta) | ||
| 165 | min_online = delta; | ||
| 166 | if (max_online < delta) | ||
| 167 | max_online = delta; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | schedule_timeout_interruptible(onoff_interval); | ||
| 171 | } | ||
| 172 | torture_kthread_stopping("torture_onoff"); | ||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Initiate online-offline handling. | ||
| 180 | */ | ||
| 181 | int torture_onoff_init(long ooholdoff, long oointerval) | ||
| 182 | { | ||
| 183 | int ret = 0; | ||
| 184 | |||
| 185 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 186 | onoff_holdoff = ooholdoff; | ||
| 187 | onoff_interval = oointerval; | ||
| 188 | if (onoff_interval <= 0) | ||
| 189 | return 0; | ||
| 190 | ret = torture_create_kthread(torture_onoff, NULL, onoff_task); | ||
| 191 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 192 | return ret; | ||
| 193 | } | ||
| 194 | EXPORT_SYMBOL_GPL(torture_onoff_init); | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Clean up after online/offline testing. | ||
| 198 | */ | ||
| 199 | static void torture_onoff_cleanup(void) | ||
| 200 | { | ||
| 201 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 202 | if (onoff_task == NULL) | ||
| 203 | return; | ||
| 204 | VERBOSE_TOROUT_STRING("Stopping torture_onoff task"); | ||
| 205 | kthread_stop(onoff_task); | ||
| 206 | onoff_task = NULL; | ||
| 207 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 208 | } | ||
| 209 | EXPORT_SYMBOL_GPL(torture_onoff_cleanup); | ||
| 210 | |||
| 211 | /* | ||
| 212 | * Print online/offline testing statistics. | ||
| 213 | */ | ||
| 214 | char *torture_onoff_stats(char *page) | ||
| 215 | { | ||
| 216 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 217 | page += sprintf(page, | ||
| 218 | "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", | ||
| 219 | n_online_successes, n_online_attempts, | ||
| 220 | n_offline_successes, n_offline_attempts, | ||
| 221 | min_online, max_online, | ||
| 222 | min_offline, max_offline, | ||
| 223 | sum_online, sum_offline, HZ); | ||
| 224 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 225 | return page; | ||
| 226 | } | ||
| 227 | EXPORT_SYMBOL_GPL(torture_onoff_stats); | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Were all the online/offline operations successful? | ||
| 231 | */ | ||
| 232 | bool torture_onoff_failures(void) | ||
| 233 | { | ||
| 234 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 235 | return n_online_successes != n_online_attempts || | ||
| 236 | n_offline_successes != n_offline_attempts; | ||
| 237 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 238 | return false; | ||
| 239 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 240 | } | ||
| 241 | EXPORT_SYMBOL_GPL(torture_onoff_failures); | ||
| 242 | |||
| 243 | #define TORTURE_RANDOM_MULT 39916801 /* prime */ | ||
| 244 | #define TORTURE_RANDOM_ADD 479001701 /* prime */ | ||
| 245 | #define TORTURE_RANDOM_REFRESH 10000 | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Crude but fast random-number generator. Uses a linear congruential | ||
| 249 | * generator, with occasional help from cpu_clock(). | ||
| 250 | */ | ||
| 251 | unsigned long | ||
| 252 | torture_random(struct torture_random_state *trsp) | ||
| 253 | { | ||
| 254 | if (--trsp->trs_count < 0) { | ||
| 255 | trsp->trs_state += (unsigned long)local_clock(); | ||
| 256 | trsp->trs_count = TORTURE_RANDOM_REFRESH; | ||
| 257 | } | ||
| 258 | trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + | ||
| 259 | TORTURE_RANDOM_ADD; | ||
| 260 | return swahw32(trsp->trs_state); | ||
| 261 | } | ||
| 262 | EXPORT_SYMBOL_GPL(torture_random); | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Variables for shuffling. The idea is to ensure that each CPU stays | ||
| 266 | * idle for an extended period to test interactions with dyntick idle, | ||
| 267 | * as well as interactions with any per-CPU varibles. | ||
| 268 | */ | ||
| 269 | struct shuffle_task { | ||
| 270 | struct list_head st_l; | ||
| 271 | struct task_struct *st_t; | ||
| 272 | }; | ||
| 273 | |||
| 274 | static long shuffle_interval; /* In jiffies. */ | ||
| 275 | static struct task_struct *shuffler_task; | ||
| 276 | static cpumask_var_t shuffle_tmp_mask; | ||
| 277 | static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ | ||
| 278 | static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); | ||
| 279 | static DEFINE_MUTEX(shuffle_task_mutex); | ||
| 280 | |||
| 281 | /* | ||
| 282 | * Register a task to be shuffled. If there is no memory, just splat | ||
| 283 | * and don't bother registering. | ||
| 284 | */ | ||
| 285 | void torture_shuffle_task_register(struct task_struct *tp) | ||
| 286 | { | ||
| 287 | struct shuffle_task *stp; | ||
| 288 | |||
| 289 | if (WARN_ON_ONCE(tp == NULL)) | ||
| 290 | return; | ||
| 291 | stp = kmalloc(sizeof(*stp), GFP_KERNEL); | ||
| 292 | if (WARN_ON_ONCE(stp == NULL)) | ||
| 293 | return; | ||
| 294 | stp->st_t = tp; | ||
| 295 | mutex_lock(&shuffle_task_mutex); | ||
| 296 | list_add(&stp->st_l, &shuffle_task_list); | ||
| 297 | mutex_unlock(&shuffle_task_mutex); | ||
| 298 | } | ||
| 299 | EXPORT_SYMBOL_GPL(torture_shuffle_task_register); | ||
| 300 | |||
| 301 | /* | ||
| 302 | * Unregister all tasks, for example, at the end of the torture run. | ||
| 303 | */ | ||
| 304 | static void torture_shuffle_task_unregister_all(void) | ||
| 305 | { | ||
| 306 | struct shuffle_task *stp; | ||
| 307 | struct shuffle_task *p; | ||
| 308 | |||
| 309 | mutex_lock(&shuffle_task_mutex); | ||
| 310 | list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { | ||
| 311 | list_del(&stp->st_l); | ||
| 312 | kfree(stp); | ||
| 313 | } | ||
| 314 | mutex_unlock(&shuffle_task_mutex); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. | ||
| 318 | * A special case is when shuffle_idle_cpu = -1, in which case we allow | ||
| 319 | * the tasks to run on all CPUs. | ||
| 320 | */ | ||
| 321 | static void torture_shuffle_tasks(void) | ||
| 322 | { | ||
| 323 | struct shuffle_task *stp; | ||
| 324 | |||
| 325 | cpumask_setall(shuffle_tmp_mask); | ||
| 326 | get_online_cpus(); | ||
| 327 | |||
| 328 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | ||
| 329 | if (num_online_cpus() == 1) { | ||
| 330 | put_online_cpus(); | ||
| 331 | return; | ||
| 332 | } | ||
| 333 | |||
| 334 | /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ | ||
| 335 | shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); | ||
| 336 | if (shuffle_idle_cpu >= nr_cpu_ids) | ||
| 337 | shuffle_idle_cpu = -1; | ||
| 338 | if (shuffle_idle_cpu != -1) { | ||
| 339 | cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); | ||
| 340 | if (cpumask_empty(shuffle_tmp_mask)) { | ||
| 341 | put_online_cpus(); | ||
| 342 | return; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | mutex_lock(&shuffle_task_mutex); | ||
| 347 | list_for_each_entry(stp, &shuffle_task_list, st_l) | ||
| 348 | set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); | ||
| 349 | mutex_unlock(&shuffle_task_mutex); | ||
| 350 | |||
| 351 | put_online_cpus(); | ||
| 352 | } | ||
| 353 | |||
| 354 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | ||
| 355 | * system to become idle at a time and cut off its timer ticks. This is meant | ||
| 356 | * to test the support for such tickless idle CPU in RCU. | ||
| 357 | */ | ||
| 358 | static int torture_shuffle(void *arg) | ||
| 359 | { | ||
| 360 | VERBOSE_TOROUT_STRING("torture_shuffle task started"); | ||
| 361 | do { | ||
| 362 | schedule_timeout_interruptible(shuffle_interval); | ||
| 363 | torture_shuffle_tasks(); | ||
| 364 | torture_shutdown_absorb("torture_shuffle"); | ||
| 365 | } while (!torture_must_stop()); | ||
| 366 | torture_kthread_stopping("torture_shuffle"); | ||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | |||
| 370 | /* | ||
| 371 | * Start the shuffler, with shuffint in jiffies. | ||
| 372 | */ | ||
| 373 | int torture_shuffle_init(long shuffint) | ||
| 374 | { | ||
| 375 | shuffle_interval = shuffint; | ||
| 376 | |||
| 377 | shuffle_idle_cpu = -1; | ||
| 378 | |||
| 379 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | ||
| 380 | VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); | ||
| 381 | return -ENOMEM; | ||
| 382 | } | ||
| 383 | |||
| 384 | /* Create the shuffler thread */ | ||
| 385 | return torture_create_kthread(torture_shuffle, NULL, shuffler_task); | ||
| 386 | } | ||
| 387 | EXPORT_SYMBOL_GPL(torture_shuffle_init); | ||
| 388 | |||
| 389 | /* | ||
| 390 | * Stop the shuffling. | ||
| 391 | */ | ||
| 392 | static void torture_shuffle_cleanup(void) | ||
| 393 | { | ||
| 394 | torture_shuffle_task_unregister_all(); | ||
| 395 | if (shuffler_task) { | ||
| 396 | VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); | ||
| 397 | kthread_stop(shuffler_task); | ||
| 398 | free_cpumask_var(shuffle_tmp_mask); | ||
| 399 | } | ||
| 400 | shuffler_task = NULL; | ||
| 401 | } | ||
| 402 | EXPORT_SYMBOL_GPL(torture_shuffle_cleanup); | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Variables for auto-shutdown. This allows "lights out" torture runs | ||
| 406 | * to be fully scripted. | ||
| 407 | */ | ||
| 408 | static int shutdown_secs; /* desired test duration in seconds. */ | ||
| 409 | static struct task_struct *shutdown_task; | ||
| 410 | static unsigned long shutdown_time; /* jiffies to system shutdown. */ | ||
| 411 | static void (*torture_shutdown_hook)(void); | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Absorb kthreads into a kernel function that won't return, so that | ||
| 415 | * they won't ever access module text or data again. | ||
| 416 | */ | ||
| 417 | void torture_shutdown_absorb(const char *title) | ||
| 418 | { | ||
| 419 | while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
| 420 | pr_notice("torture thread %s parking due to system shutdown\n", | ||
| 421 | title); | ||
| 422 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
| 423 | } | ||
| 424 | } | ||
| 425 | EXPORT_SYMBOL_GPL(torture_shutdown_absorb); | ||
| 426 | |||
| 427 | /* | ||
| 428 | * Cause the torture test to shutdown the system after the test has | ||
| 429 | * run for the time specified by the shutdown_secs parameter. | ||
| 430 | */ | ||
| 431 | static int torture_shutdown(void *arg) | ||
| 432 | { | ||
| 433 | long delta; | ||
| 434 | unsigned long jiffies_snap; | ||
| 435 | |||
| 436 | VERBOSE_TOROUT_STRING("torture_shutdown task started"); | ||
| 437 | jiffies_snap = jiffies; | ||
| 438 | while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && | ||
| 439 | !torture_must_stop()) { | ||
| 440 | delta = shutdown_time - jiffies_snap; | ||
| 441 | if (verbose) | ||
| 442 | pr_alert("%s" TORTURE_FLAG | ||
| 443 | "torture_shutdown task: %lu jiffies remaining\n", | ||
| 444 | torture_type, delta); | ||
| 445 | schedule_timeout_interruptible(delta); | ||
| 446 | jiffies_snap = jiffies; | ||
| 447 | } | ||
| 448 | if (torture_must_stop()) { | ||
| 449 | torture_kthread_stopping("torture_shutdown"); | ||
| 450 | return 0; | ||
| 451 | } | ||
| 452 | |||
| 453 | /* OK, shut down the system. */ | ||
| 454 | |||
| 455 | VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system"); | ||
| 456 | shutdown_task = NULL; /* Avoid self-kill deadlock. */ | ||
| 457 | if (torture_shutdown_hook) | ||
| 458 | torture_shutdown_hook(); | ||
| 459 | else | ||
| 460 | VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping."); | ||
| 461 | kernel_power_off(); /* Shut down the system. */ | ||
| 462 | return 0; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* | ||
| 466 | * Start up the shutdown task. | ||
| 467 | */ | ||
| 468 | int torture_shutdown_init(int ssecs, void (*cleanup)(void)) | ||
| 469 | { | ||
| 470 | int ret = 0; | ||
| 471 | |||
| 472 | shutdown_secs = ssecs; | ||
| 473 | torture_shutdown_hook = cleanup; | ||
| 474 | if (shutdown_secs > 0) { | ||
| 475 | shutdown_time = jiffies + shutdown_secs * HZ; | ||
| 476 | ret = torture_create_kthread(torture_shutdown, NULL, | ||
| 477 | shutdown_task); | ||
| 478 | } | ||
| 479 | return ret; | ||
| 480 | } | ||
| 481 | EXPORT_SYMBOL_GPL(torture_shutdown_init); | ||
| 482 | |||
| 483 | /* | ||
| 484 | * Detect and respond to a system shutdown. | ||
| 485 | */ | ||
| 486 | static int torture_shutdown_notify(struct notifier_block *unused1, | ||
| 487 | unsigned long unused2, void *unused3) | ||
| 488 | { | ||
| 489 | mutex_lock(&fullstop_mutex); | ||
| 490 | if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) { | ||
| 491 | VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); | ||
| 492 | ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN; | ||
| 493 | } else { | ||
| 494 | pr_warn("Concurrent rmmod and shutdown illegal!\n"); | ||
| 495 | } | ||
| 496 | mutex_unlock(&fullstop_mutex); | ||
| 497 | return NOTIFY_DONE; | ||
| 498 | } | ||
| 499 | |||
| 500 | static struct notifier_block torture_shutdown_nb = { | ||
| 501 | .notifier_call = torture_shutdown_notify, | ||
| 502 | }; | ||
| 503 | |||
| 504 | /* | ||
| 505 | * Shut down the shutdown task. Say what??? Heh! This can happen if | ||
| 506 | * the torture module gets an rmmod before the shutdown time arrives. ;-) | ||
| 507 | */ | ||
| 508 | static void torture_shutdown_cleanup(void) | ||
| 509 | { | ||
| 510 | unregister_reboot_notifier(&torture_shutdown_nb); | ||
| 511 | if (shutdown_task != NULL) { | ||
| 512 | VERBOSE_TOROUT_STRING("Stopping torture_shutdown task"); | ||
| 513 | kthread_stop(shutdown_task); | ||
| 514 | } | ||
| 515 | shutdown_task = NULL; | ||
| 516 | } | ||
| 517 | |||
| 518 | /* | ||
| 519 | * Variables for stuttering, which means to periodically pause and | ||
| 520 | * restart testing in order to catch bugs that appear when load is | ||
| 521 | * suddenly applied to or removed from the system. | ||
| 522 | */ | ||
| 523 | static struct task_struct *stutter_task; | ||
| 524 | static int stutter_pause_test; | ||
| 525 | static int stutter; | ||
| 526 | |||
| 527 | /* | ||
| 528 | * Block until the stutter interval ends. This must be called periodically | ||
| 529 | * by all running kthreads that need to be subject to stuttering. | ||
| 530 | */ | ||
| 531 | void stutter_wait(const char *title) | ||
| 532 | { | ||
| 533 | while (ACCESS_ONCE(stutter_pause_test) || | ||
| 534 | (torture_runnable && !ACCESS_ONCE(*torture_runnable))) { | ||
| 535 | if (stutter_pause_test) | ||
| 536 | schedule_timeout_interruptible(1); | ||
| 537 | else | ||
| 538 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | ||
| 539 | torture_shutdown_absorb(title); | ||
| 540 | } | ||
| 541 | } | ||
| 542 | EXPORT_SYMBOL_GPL(stutter_wait); | ||
| 543 | |||
| 544 | /* | ||
| 545 | * Cause the torture test to "stutter", starting and stopping all | ||
| 546 | * threads periodically. | ||
| 547 | */ | ||
| 548 | static int torture_stutter(void *arg) | ||
| 549 | { | ||
| 550 | VERBOSE_TOROUT_STRING("torture_stutter task started"); | ||
| 551 | do { | ||
| 552 | if (!torture_must_stop()) { | ||
| 553 | schedule_timeout_interruptible(stutter); | ||
| 554 | ACCESS_ONCE(stutter_pause_test) = 1; | ||
| 555 | } | ||
| 556 | if (!torture_must_stop()) | ||
| 557 | schedule_timeout_interruptible(stutter); | ||
| 558 | ACCESS_ONCE(stutter_pause_test) = 0; | ||
| 559 | torture_shutdown_absorb("torture_stutter"); | ||
| 560 | } while (!torture_must_stop()); | ||
| 561 | torture_kthread_stopping("torture_stutter"); | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | |||
| 565 | /* | ||
| 566 | * Initialize and kick off the torture_stutter kthread. | ||
| 567 | */ | ||
| 568 | int torture_stutter_init(int s) | ||
| 569 | { | ||
| 570 | int ret; | ||
| 571 | |||
| 572 | stutter = s; | ||
| 573 | ret = torture_create_kthread(torture_stutter, NULL, stutter_task); | ||
| 574 | return ret; | ||
| 575 | } | ||
| 576 | EXPORT_SYMBOL_GPL(torture_stutter_init); | ||
| 577 | |||
| 578 | /* | ||
| 579 | * Cleanup after the torture_stutter kthread. | ||
| 580 | */ | ||
| 581 | static void torture_stutter_cleanup(void) | ||
| 582 | { | ||
| 583 | if (!stutter_task) | ||
| 584 | return; | ||
| 585 | VERBOSE_TOROUT_STRING("Stopping torture_stutter task"); | ||
| 586 | kthread_stop(stutter_task); | ||
| 587 | stutter_task = NULL; | ||
| 588 | } | ||
| 589 | |||
| 590 | /* | ||
| 591 | * Initialize torture module. Please note that this is -not- invoked via | ||
| 592 | * the usual module_init() mechanism, but rather by an explicit call from | ||
| 593 | * the client torture module. This call must be paired with a later | ||
| 594 | * torture_init_end(). | ||
| 595 | * | ||
| 596 | * The runnable parameter points to a flag that controls whether or not | ||
| 597 | * the test is currently runnable. If there is no such flag, pass in NULL. | ||
| 598 | */ | ||
| 599 | void __init torture_init_begin(char *ttype, bool v, int *runnable) | ||
| 600 | { | ||
| 601 | mutex_lock(&fullstop_mutex); | ||
| 602 | torture_type = ttype; | ||
| 603 | verbose = v; | ||
| 604 | torture_runnable = runnable; | ||
| 605 | fullstop = FULLSTOP_DONTSTOP; | ||
| 606 | |||
| 607 | } | ||
| 608 | EXPORT_SYMBOL_GPL(torture_init_begin); | ||
| 609 | |||
| 610 | /* | ||
| 611 | * Tell the torture module that initialization is complete. | ||
| 612 | */ | ||
| 613 | void __init torture_init_end(void) | ||
| 614 | { | ||
| 615 | mutex_unlock(&fullstop_mutex); | ||
| 616 | register_reboot_notifier(&torture_shutdown_nb); | ||
| 617 | } | ||
| 618 | EXPORT_SYMBOL_GPL(torture_init_end); | ||
| 619 | |||
| 620 | /* | ||
| 621 | * Clean up torture module. Please note that this is -not- invoked via | ||
| 622 | * the usual module_exit() mechanism, but rather by an explicit call from | ||
| 623 | * the client torture module. Returns true if a race with system shutdown | ||
| 624 | * is detected, otherwise, all kthreads started by functions in this file | ||
| 625 | * will be shut down. | ||
| 626 | * | ||
| 627 | * This must be called before the caller starts shutting down its own | ||
| 628 | * kthreads. | ||
| 629 | */ | ||
| 630 | bool torture_cleanup(void) | ||
| 631 | { | ||
| 632 | mutex_lock(&fullstop_mutex); | ||
| 633 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
| 634 | pr_warn("Concurrent rmmod and shutdown illegal!\n"); | ||
| 635 | mutex_unlock(&fullstop_mutex); | ||
| 636 | schedule_timeout_uninterruptible(10); | ||
| 637 | return true; | ||
| 638 | } | ||
| 639 | ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD; | ||
| 640 | mutex_unlock(&fullstop_mutex); | ||
| 641 | torture_shutdown_cleanup(); | ||
| 642 | torture_shuffle_cleanup(); | ||
| 643 | torture_stutter_cleanup(); | ||
| 644 | torture_onoff_cleanup(); | ||
| 645 | return false; | ||
| 646 | } | ||
| 647 | EXPORT_SYMBOL_GPL(torture_cleanup); | ||
| 648 | |||
| 649 | /* | ||
| 650 | * Is it time for the current torture test to stop? | ||
| 651 | */ | ||
| 652 | bool torture_must_stop(void) | ||
| 653 | { | ||
| 654 | return torture_must_stop_irq() || kthread_should_stop(); | ||
| 655 | } | ||
| 656 | EXPORT_SYMBOL_GPL(torture_must_stop); | ||
| 657 | |||
| 658 | /* | ||
| 659 | * Is it time for the current torture test to stop? This is the irq-safe | ||
| 660 | * version, hence no check for kthread_should_stop(). | ||
| 661 | */ | ||
| 662 | bool torture_must_stop_irq(void) | ||
| 663 | { | ||
| 664 | return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP; | ||
| 665 | } | ||
| 666 | EXPORT_SYMBOL_GPL(torture_must_stop_irq); | ||
| 667 | |||
| 668 | /* | ||
| 669 | * Each kthread must wait for kthread_should_stop() before returning from | ||
| 670 | * its top-level function, otherwise segfaults ensue. This function | ||
| 671 | * prints a "stopping" message and waits for kthread_should_stop(), and | ||
| 672 | * should be called from all torture kthreads immediately prior to | ||
| 673 | * returning. | ||
| 674 | */ | ||
| 675 | void torture_kthread_stopping(char *title) | ||
| 676 | { | ||
| 677 | if (verbose) | ||
| 678 | VERBOSE_TOROUT_STRING(title); | ||
| 679 | while (!kthread_should_stop()) { | ||
| 680 | torture_shutdown_absorb(title); | ||
| 681 | schedule_timeout_uninterruptible(1); | ||
| 682 | } | ||
| 683 | } | ||
| 684 | EXPORT_SYMBOL_GPL(torture_kthread_stopping); | ||
| 685 | |||
| 686 | /* | ||
| 687 | * Create a generic torture kthread that is immediately runnable. If you | ||
| 688 | * need the kthread to be stopped so that you can do something to it before | ||
| 689 | * it starts, you will need to open-code your own. | ||
| 690 | */ | ||
| 691 | int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, | ||
| 692 | char *f, struct task_struct **tp) | ||
| 693 | { | ||
| 694 | int ret = 0; | ||
| 695 | |||
| 696 | VERBOSE_TOROUT_STRING(m); | ||
| 697 | *tp = kthread_run(fn, arg, s); | ||
| 698 | if (IS_ERR(*tp)) { | ||
| 699 | ret = PTR_ERR(*tp); | ||
| 700 | VERBOSE_TOROUT_ERRSTRING(f); | ||
| 701 | *tp = NULL; | ||
| 702 | } | ||
| 703 | torture_shuffle_task_register(*tp); | ||
| 704 | return ret; | ||
| 705 | } | ||
| 706 | EXPORT_SYMBOL_GPL(_torture_create_kthread); | ||
| 707 | |||
| 708 | /* | ||
| 709 | * Stop a generic kthread, emitting a message. | ||
| 710 | */ | ||
| 711 | void _torture_stop_kthread(char *m, struct task_struct **tp) | ||
| 712 | { | ||
| 713 | if (*tp == NULL) | ||
| 714 | return; | ||
| 715 | VERBOSE_TOROUT_STRING(m); | ||
| 716 | kthread_stop(*tp); | ||
| 717 | *tp = NULL; | ||
| 718 | } | ||
| 719 | EXPORT_SYMBOL_GPL(_torture_stop_kthread); | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 015f85aaca08..8639819f6cef 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -424,6 +424,7 @@ config UPROBE_EVENT | |||
| 424 | bool "Enable uprobes-based dynamic events" | 424 | bool "Enable uprobes-based dynamic events" |
| 425 | depends on ARCH_SUPPORTS_UPROBES | 425 | depends on ARCH_SUPPORTS_UPROBES |
| 426 | depends on MMU | 426 | depends on MMU |
| 427 | depends on PERF_EVENTS | ||
| 427 | select UPROBES | 428 | select UPROBES |
| 428 | select PROBE_EVENTS | 429 | select PROBE_EVENTS |
| 429 | select TRACING | 430 | select TRACING |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b418cb0d7242..c1bd4ada2a04 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 702 | * blk_add_trace_rq - Add a trace for a request oriented action | 702 | * blk_add_trace_rq - Add a trace for a request oriented action |
| 703 | * @q: queue the io is for | 703 | * @q: queue the io is for |
| 704 | * @rq: the source request | 704 | * @rq: the source request |
| 705 | * @nr_bytes: number of completed bytes | ||
| 705 | * @what: the action | 706 | * @what: the action |
| 706 | * | 707 | * |
| 707 | * Description: | 708 | * Description: |
| @@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 709 | * | 710 | * |
| 710 | **/ | 711 | **/ |
| 711 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | 712 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
| 712 | u32 what) | 713 | unsigned int nr_bytes, u32 what) |
| 713 | { | 714 | { |
| 714 | struct blk_trace *bt = q->blk_trace; | 715 | struct blk_trace *bt = q->blk_trace; |
| 715 | 716 | ||
| @@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 718 | 719 | ||
| 719 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 720 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
| 720 | what |= BLK_TC_ACT(BLK_TC_PC); | 721 | what |= BLK_TC_ACT(BLK_TC_PC); |
| 721 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, | 722 | __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, |
| 722 | what, rq->errors, rq->cmd_len, rq->cmd); | 723 | what, rq->errors, rq->cmd_len, rq->cmd); |
| 723 | } else { | 724 | } else { |
| 724 | what |= BLK_TC_ACT(BLK_TC_FS); | 725 | what |= BLK_TC_ACT(BLK_TC_FS); |
| 725 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | 726 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, |
| 726 | rq->cmd_flags, what, rq->errors, 0, NULL); | 727 | rq->cmd_flags, what, rq->errors, 0, NULL); |
| 727 | } | 728 | } |
| 728 | } | 729 | } |
| @@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 730 | static void blk_add_trace_rq_abort(void *ignore, | 731 | static void blk_add_trace_rq_abort(void *ignore, |
| 731 | struct request_queue *q, struct request *rq) | 732 | struct request_queue *q, struct request *rq) |
| 732 | { | 733 | { |
| 733 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | 734 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); |
| 734 | } | 735 | } |
| 735 | 736 | ||
| 736 | static void blk_add_trace_rq_insert(void *ignore, | 737 | static void blk_add_trace_rq_insert(void *ignore, |
| 737 | struct request_queue *q, struct request *rq) | 738 | struct request_queue *q, struct request *rq) |
| 738 | { | 739 | { |
| 739 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | 740 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); |
| 740 | } | 741 | } |
| 741 | 742 | ||
| 742 | static void blk_add_trace_rq_issue(void *ignore, | 743 | static void blk_add_trace_rq_issue(void *ignore, |
| 743 | struct request_queue *q, struct request *rq) | 744 | struct request_queue *q, struct request *rq) |
| 744 | { | 745 | { |
| 745 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 746 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE); |
| 746 | } | 747 | } |
| 747 | 748 | ||
| 748 | static void blk_add_trace_rq_requeue(void *ignore, | 749 | static void blk_add_trace_rq_requeue(void *ignore, |
| 749 | struct request_queue *q, | 750 | struct request_queue *q, |
| 750 | struct request *rq) | 751 | struct request *rq) |
| 751 | { | 752 | { |
| 752 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 753 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE); |
| 753 | } | 754 | } |
| 754 | 755 | ||
| 755 | static void blk_add_trace_rq_complete(void *ignore, | 756 | static void blk_add_trace_rq_complete(void *ignore, |
| 756 | struct request_queue *q, | 757 | struct request_queue *q, |
| 757 | struct request *rq) | 758 | struct request *rq, |
| 759 | unsigned int nr_bytes) | ||
| 758 | { | 760 | { |
| 759 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | 761 | blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE); |
| 760 | } | 762 | } |
| 761 | 763 | ||
| 762 | /** | 764 | /** |
| @@ -1427,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | |||
| 1427 | return print_one_line(iter, true); | 1429 | return print_one_line(iter, true); |
| 1428 | } | 1430 | } |
| 1429 | 1431 | ||
| 1430 | static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) | 1432 | static int |
| 1433 | blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1431 | { | 1434 | { |
| 1432 | /* don't output context-info for blk_classic output */ | 1435 | /* don't output context-info for blk_classic output */ |
| 1433 | if (bit == TRACE_BLK_OPT_CLASSIC) { | 1436 | if (bit == TRACE_BLK_OPT_CLASSIC) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd7f76d1eb86..1fd4b9479210 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops) | |||
| 237 | return 0; | 237 | return 0; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 241 | { | ||
| 242 | free_percpu(ops->disabled); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void update_global_ops(void) | 240 | static void update_global_ops(void) |
| 246 | { | 241 | { |
| 247 | ftrace_func_t func; | 242 | ftrace_func_t func = ftrace_global_list_func; |
| 243 | void *private = NULL; | ||
| 244 | |||
| 245 | /* The list has its own recursion protection. */ | ||
| 246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 248 | 247 | ||
| 249 | /* | 248 | /* |
| 250 | * If there's only one function registered, then call that | 249 | * If there's only one function registered, then call that |
| @@ -254,23 +253,17 @@ static void update_global_ops(void) | |||
| 254 | if (ftrace_global_list == &ftrace_list_end || | 253 | if (ftrace_global_list == &ftrace_list_end || |
| 255 | ftrace_global_list->next == &ftrace_list_end) { | 254 | ftrace_global_list->next == &ftrace_list_end) { |
| 256 | func = ftrace_global_list->func; | 255 | func = ftrace_global_list->func; |
| 256 | private = ftrace_global_list->private; | ||
| 257 | /* | 257 | /* |
| 258 | * As we are calling the function directly. | 258 | * As we are calling the function directly. |
| 259 | * If it does not have recursion protection, | 259 | * If it does not have recursion protection, |
| 260 | * the function_trace_op needs to be updated | 260 | * the function_trace_op needs to be updated |
| 261 | * accordingly. | 261 | * accordingly. |
| 262 | */ | 262 | */ |
| 263 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | 263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) |
| 264 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 265 | else | ||
| 266 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | 264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; |
| 267 | } else { | ||
| 268 | func = ftrace_global_list_func; | ||
| 269 | /* The list has its own recursion protection. */ | ||
| 270 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 271 | } | 265 | } |
| 272 | 266 | ||
| 273 | |||
| 274 | /* If we filter on pids, update to use the pid function */ | 267 | /* If we filter on pids, update to use the pid function */ |
| 275 | if (!list_empty(&ftrace_pids)) { | 268 | if (!list_empty(&ftrace_pids)) { |
| 276 | set_ftrace_pid_function(func); | 269 | set_ftrace_pid_function(func); |
| @@ -278,6 +271,7 @@ static void update_global_ops(void) | |||
| 278 | } | 271 | } |
| 279 | 272 | ||
| 280 | global_ops.func = func; | 273 | global_ops.func = func; |
| 274 | global_ops.private = private; | ||
| 281 | } | 275 | } |
| 282 | 276 | ||
| 283 | static void ftrace_sync(struct work_struct *work) | 277 | static void ftrace_sync(struct work_struct *work) |
| @@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
| 437 | 431 | ||
| 438 | static int __register_ftrace_function(struct ftrace_ops *ops) | 432 | static int __register_ftrace_function(struct ftrace_ops *ops) |
| 439 | { | 433 | { |
| 434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | ||
| 435 | return -EINVAL; | ||
| 436 | |||
| 440 | if (FTRACE_WARN_ON(ops == &global_ops)) | 437 | if (FTRACE_WARN_ON(ops == &global_ops)) |
| 441 | return -EINVAL; | 438 | return -EINVAL; |
| 442 | 439 | ||
| @@ -1172,8 +1169,6 @@ struct ftrace_page { | |||
| 1172 | int size; | 1169 | int size; |
| 1173 | }; | 1170 | }; |
| 1174 | 1171 | ||
| 1175 | static struct ftrace_page *ftrace_new_pgs; | ||
| 1176 | |||
| 1177 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | 1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
| 1178 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | 1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
| 1179 | 1174 | ||
| @@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip) | |||
| 1560 | * the function tracer. It checks the ftrace internal tables to | 1555 | * the function tracer. It checks the ftrace internal tables to |
| 1561 | * determine if the address belongs or not. | 1556 | * determine if the address belongs or not. |
| 1562 | */ | 1557 | */ |
| 1563 | int ftrace_text_reserved(void *start, void *end) | 1558 | int ftrace_text_reserved(const void *start, const void *end) |
| 1564 | { | 1559 | { |
| 1565 | unsigned long ret; | 1560 | unsigned long ret; |
| 1566 | 1561 | ||
| @@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
| 1994 | void ftrace_modify_all_code(int command) | 1989 | void ftrace_modify_all_code(int command) |
| 1995 | { | 1990 | { |
| 1996 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
| 1992 | int err = 0; | ||
| 1997 | 1993 | ||
| 1998 | /* | 1994 | /* |
| 1999 | * If the ftrace_caller calls a ftrace_ops func directly, | 1995 | * If the ftrace_caller calls a ftrace_ops func directly, |
| @@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command) | |||
| 2005 | * to make sure the ops are having the right functions | 2001 | * to make sure the ops are having the right functions |
| 2006 | * traced. | 2002 | * traced. |
| 2007 | */ | 2003 | */ |
| 2008 | if (update) | 2004 | if (update) { |
| 2009 | ftrace_update_ftrace_func(ftrace_ops_list_func); | 2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); |
| 2006 | if (FTRACE_WARN_ON(err)) | ||
| 2007 | return; | ||
| 2008 | } | ||
| 2010 | 2009 | ||
| 2011 | if (command & FTRACE_UPDATE_CALLS) | 2010 | if (command & FTRACE_UPDATE_CALLS) |
| 2012 | ftrace_replace_code(1); | 2011 | ftrace_replace_code(1); |
| @@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command) | |||
| 2019 | /* If irqs are disabled, we are in stop machine */ | 2018 | /* If irqs are disabled, we are in stop machine */ |
| 2020 | if (!irqs_disabled()) | 2019 | if (!irqs_disabled()) |
| 2021 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
| 2022 | ftrace_update_ftrace_func(ftrace_trace_function); | 2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
| 2022 | if (FTRACE_WARN_ON(err)) | ||
| 2023 | return; | ||
| 2023 | } | 2024 | } |
| 2024 | 2025 | ||
| 2025 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
| 2026 | ftrace_enable_ftrace_graph_caller(); | 2027 | err = ftrace_enable_ftrace_graph_caller(); |
| 2027 | else if (command & FTRACE_STOP_FUNC_RET) | 2028 | else if (command & FTRACE_STOP_FUNC_RET) |
| 2028 | ftrace_disable_ftrace_graph_caller(); | 2029 | err = ftrace_disable_ftrace_graph_caller(); |
| 2030 | FTRACE_WARN_ON(err); | ||
| 2029 | } | 2031 | } |
| 2030 | 2032 | ||
| 2031 | static int __ftrace_modify_code(void *data) | 2033 | static int __ftrace_modify_code(void *data) |
| @@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func; | |||
| 2093 | static int ftrace_start_up; | 2095 | static int ftrace_start_up; |
| 2094 | static int global_start_up; | 2096 | static int global_start_up; |
| 2095 | 2097 | ||
| 2098 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 2099 | { | ||
| 2100 | free_percpu(ops->disabled); | ||
| 2101 | } | ||
| 2102 | |||
| 2096 | static void ftrace_startup_enable(int command) | 2103 | static void ftrace_startup_enable(int command) |
| 2097 | { | 2104 | { |
| 2098 | if (saved_ftrace_func != ftrace_trace_function) { | 2105 | if (saved_ftrace_func != ftrace_trace_function) { |
| @@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void) | |||
| 2244 | } | 2251 | } |
| 2245 | 2252 | ||
| 2246 | static cycle_t ftrace_update_time; | 2253 | static cycle_t ftrace_update_time; |
| 2247 | static unsigned long ftrace_update_cnt; | ||
| 2248 | unsigned long ftrace_update_tot_cnt; | 2254 | unsigned long ftrace_update_tot_cnt; |
| 2249 | 2255 | ||
| 2250 | static inline int ops_traces_mod(struct ftrace_ops *ops) | 2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
| @@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec) | |||
| 2300 | return cnt; | 2306 | return cnt; |
| 2301 | } | 2307 | } |
| 2302 | 2308 | ||
| 2303 | static int ftrace_update_code(struct module *mod) | 2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
| 2304 | { | 2310 | { |
| 2305 | struct ftrace_page *pg; | 2311 | struct ftrace_page *pg; |
| 2306 | struct dyn_ftrace *p; | 2312 | struct dyn_ftrace *p; |
| 2307 | cycle_t start, stop; | 2313 | cycle_t start, stop; |
| 2314 | unsigned long update_cnt = 0; | ||
| 2308 | unsigned long ref = 0; | 2315 | unsigned long ref = 0; |
| 2309 | bool test = false; | 2316 | bool test = false; |
| 2310 | int i; | 2317 | int i; |
| @@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod) | |||
| 2330 | } | 2337 | } |
| 2331 | 2338 | ||
| 2332 | start = ftrace_now(raw_smp_processor_id()); | 2339 | start = ftrace_now(raw_smp_processor_id()); |
| 2333 | ftrace_update_cnt = 0; | ||
| 2334 | 2340 | ||
| 2335 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2341 | for (pg = new_pgs; pg; pg = pg->next) { |
| 2336 | 2342 | ||
| 2337 | for (i = 0; i < pg->index; i++) { | 2343 | for (i = 0; i < pg->index; i++) { |
| 2338 | int cnt = ref; | 2344 | int cnt = ref; |
| @@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod) | |||
| 2353 | if (!ftrace_code_disable(mod, p)) | 2359 | if (!ftrace_code_disable(mod, p)) |
| 2354 | break; | 2360 | break; |
| 2355 | 2361 | ||
| 2356 | ftrace_update_cnt++; | 2362 | update_cnt++; |
| 2357 | 2363 | ||
| 2358 | /* | 2364 | /* |
| 2359 | * If the tracing is enabled, go ahead and enable the record. | 2365 | * If the tracing is enabled, go ahead and enable the record. |
| @@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod) | |||
| 2372 | } | 2378 | } |
| 2373 | } | 2379 | } |
| 2374 | 2380 | ||
| 2375 | ftrace_new_pgs = NULL; | ||
| 2376 | |||
| 2377 | stop = ftrace_now(raw_smp_processor_id()); | 2381 | stop = ftrace_now(raw_smp_processor_id()); |
| 2378 | ftrace_update_time = stop - start; | 2382 | ftrace_update_time = stop - start; |
| 2379 | ftrace_update_tot_cnt += ftrace_update_cnt; | 2383 | ftrace_update_tot_cnt += update_cnt; |
| 2380 | 2384 | ||
| 2381 | return 0; | 2385 | return 0; |
| 2382 | } | 2386 | } |
| @@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
| 2468 | return NULL; | 2472 | return NULL; |
| 2469 | } | 2473 | } |
| 2470 | 2474 | ||
| 2471 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | ||
| 2472 | { | ||
| 2473 | int cnt; | ||
| 2474 | |||
| 2475 | if (!num_to_init) { | ||
| 2476 | pr_info("ftrace: No functions to be traced?\n"); | ||
| 2477 | return -1; | ||
| 2478 | } | ||
| 2479 | |||
| 2480 | cnt = num_to_init / ENTRIES_PER_PAGE; | ||
| 2481 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
| 2482 | num_to_init, cnt + 1); | ||
| 2483 | |||
| 2484 | return 0; | ||
| 2485 | } | ||
| 2486 | |||
| 2487 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
| 2488 | 2476 | ||
| 2489 | struct ftrace_iterator { | 2477 | struct ftrace_iterator { |
| @@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 2871 | static int | 2859 | static int |
| 2872 | ftrace_filter_open(struct inode *inode, struct file *file) | 2860 | ftrace_filter_open(struct inode *inode, struct file *file) |
| 2873 | { | 2861 | { |
| 2874 | return ftrace_regex_open(&global_ops, | 2862 | struct ftrace_ops *ops = inode->i_private; |
| 2863 | |||
| 2864 | return ftrace_regex_open(ops, | ||
| 2875 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, |
| 2876 | inode, file); | 2866 | inode, file); |
| 2877 | } | 2867 | } |
| @@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file) | |||
| 2879 | static int | 2869 | static int |
| 2880 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2870 | ftrace_notrace_open(struct inode *inode, struct file *file) |
| 2881 | { | 2871 | { |
| 2882 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, | 2872 | struct ftrace_ops *ops = inode->i_private; |
| 2873 | |||
| 2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | ||
| 2883 | inode, file); | 2875 | inode, file); |
| 2884 | } | 2876 | } |
| 2885 | 2877 | ||
| @@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = { | |||
| 4109 | }; | 4101 | }; |
| 4110 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 4111 | 4103 | ||
| 4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 4105 | struct dentry *parent) | ||
| 4106 | { | ||
| 4107 | |||
| 4108 | trace_create_file("set_ftrace_filter", 0644, parent, | ||
| 4109 | ops, &ftrace_filter_fops); | ||
| 4110 | |||
| 4111 | trace_create_file("set_ftrace_notrace", 0644, parent, | ||
| 4112 | ops, &ftrace_notrace_fops); | ||
| 4113 | } | ||
| 4114 | |||
| 4115 | /* | ||
| 4116 | * The name "destroy_filter_files" is really a misnomer. Although | ||
| 4117 | * in the future, it may actualy delete the files, but this is | ||
| 4118 | * really intended to make sure the ops passed in are disabled | ||
| 4119 | * and that when this function returns, the caller is free to | ||
| 4120 | * free the ops. | ||
| 4121 | * | ||
| 4122 | * The "destroy" name is only to match the "create" name that this | ||
| 4123 | * should be paired with. | ||
| 4124 | */ | ||
| 4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | ||
| 4126 | { | ||
| 4127 | mutex_lock(&ftrace_lock); | ||
| 4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | ||
| 4129 | ftrace_shutdown(ops, 0); | ||
| 4130 | ops->flags |= FTRACE_OPS_FL_DELETED; | ||
| 4131 | mutex_unlock(&ftrace_lock); | ||
| 4132 | } | ||
| 4133 | |||
| 4112 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| 4113 | { | 4135 | { |
| 4114 | 4136 | ||
| @@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
| 4118 | trace_create_file("enabled_functions", 0444, | 4140 | trace_create_file("enabled_functions", 0444, |
| 4119 | d_tracer, NULL, &ftrace_enabled_fops); | 4141 | d_tracer, NULL, &ftrace_enabled_fops); |
| 4120 | 4142 | ||
| 4121 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 4143 | ftrace_create_filter_files(&global_ops, d_tracer); |
| 4122 | NULL, &ftrace_filter_fops); | ||
| 4123 | |||
| 4124 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, | ||
| 4125 | NULL, &ftrace_notrace_fops); | ||
| 4126 | 4144 | ||
| 4127 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 4128 | trace_create_file("set_graph_function", 0444, d_tracer, | 4146 | trace_create_file("set_graph_function", 0444, d_tracer, |
| @@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4238 | /* Assign the last page to ftrace_pages */ | 4256 | /* Assign the last page to ftrace_pages */ |
| 4239 | ftrace_pages = pg; | 4257 | ftrace_pages = pg; |
| 4240 | 4258 | ||
| 4241 | /* These new locations need to be initialized */ | ||
| 4242 | ftrace_new_pgs = start_pg; | ||
| 4243 | |||
| 4244 | /* | 4259 | /* |
| 4245 | * We only need to disable interrupts on start up | 4260 | * We only need to disable interrupts on start up |
| 4246 | * because we are modifying code that an interrupt | 4261 | * because we are modifying code that an interrupt |
| @@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4251 | */ | 4266 | */ |
| 4252 | if (!mod) | 4267 | if (!mod) |
| 4253 | local_irq_save(flags); | 4268 | local_irq_save(flags); |
| 4254 | ftrace_update_code(mod); | 4269 | ftrace_update_code(mod, start_pg); |
| 4255 | if (!mod) | 4270 | if (!mod) |
| 4256 | local_irq_restore(flags); | 4271 | local_irq_restore(flags); |
| 4257 | ret = 0; | 4272 | ret = 0; |
| @@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = { | |||
| 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| 4361 | }; | 4376 | }; |
| 4362 | 4377 | ||
| 4363 | extern unsigned long __start_mcount_loc[]; | ||
| 4364 | extern unsigned long __stop_mcount_loc[]; | ||
| 4365 | |||
| 4366 | void __init ftrace_init(void) | 4378 | void __init ftrace_init(void) |
| 4367 | { | 4379 | { |
| 4368 | unsigned long count, addr, flags; | 4380 | extern unsigned long __start_mcount_loc[]; |
| 4381 | extern unsigned long __stop_mcount_loc[]; | ||
| 4382 | unsigned long count, flags; | ||
| 4369 | int ret; | 4383 | int ret; |
| 4370 | 4384 | ||
| 4371 | /* Keep the ftrace pointer to the stub */ | ||
| 4372 | addr = (unsigned long)ftrace_stub; | ||
| 4373 | |||
| 4374 | local_irq_save(flags); | 4385 | local_irq_save(flags); |
| 4375 | ftrace_dyn_arch_init(&addr); | 4386 | ret = ftrace_dyn_arch_init(); |
| 4376 | local_irq_restore(flags); | 4387 | local_irq_restore(flags); |
| 4377 | 4388 | if (ret) | |
| 4378 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
| 4379 | if (addr) | ||
| 4380 | goto failed; | 4389 | goto failed; |
| 4381 | 4390 | ||
| 4382 | count = __stop_mcount_loc - __start_mcount_loc; | 4391 | count = __stop_mcount_loc - __start_mcount_loc; |
| 4383 | 4392 | if (!count) { | |
| 4384 | ret = ftrace_dyn_table_alloc(count); | 4393 | pr_info("ftrace: No functions to be traced?\n"); |
| 4385 | if (ret) | ||
| 4386 | goto failed; | 4394 | goto failed; |
| 4395 | } | ||
| 4396 | |||
| 4397 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | ||
| 4398 | count, count / ENTRIES_PER_PAGE + 1); | ||
| 4387 | 4399 | ||
| 4388 | last_ftrace_enabled = ftrace_enabled = 1; | 4400 | last_ftrace_enabled = ftrace_enabled = 1; |
| 4389 | 4401 | ||
| @@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { } | |||
| 4431 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4443 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
| 4432 | ___ret; \ | 4444 | ___ret; \ |
| 4433 | }) | 4445 | }) |
| 4434 | # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) | 4446 | # define ftrace_shutdown(ops, command) \ |
| 4447 | ({ \ | ||
| 4448 | int ___ret = __unregister_ftrace_function(ops); \ | ||
| 4449 | if (!___ret) \ | ||
| 4450 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | ||
| 4451 | ___ret; \ | ||
| 4452 | }) | ||
| 4435 | 4453 | ||
| 4436 | # define ftrace_startup_sysctl() do { } while (0) | 4454 | # define ftrace_startup_sysctl() do { } while (0) |
| 4437 | # define ftrace_shutdown_sysctl() do { } while (0) | 4455 | # define ftrace_shutdown_sysctl() do { } while (0) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fc4da2d97f9b..c634868c2921 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1301 | * In that off case, we need to allocate for all possible cpus. | 1301 | * In that off case, we need to allocate for all possible cpus. |
| 1302 | */ | 1302 | */ |
| 1303 | #ifdef CONFIG_HOTPLUG_CPU | 1303 | #ifdef CONFIG_HOTPLUG_CPU |
| 1304 | get_online_cpus(); | 1304 | cpu_notifier_register_begin(); |
| 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); | 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); |
| 1306 | #else | 1306 | #else |
| 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
| @@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1324 | #ifdef CONFIG_HOTPLUG_CPU | 1324 | #ifdef CONFIG_HOTPLUG_CPU |
| 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; |
| 1326 | buffer->cpu_notify.priority = 0; | 1326 | buffer->cpu_notify.priority = 0; |
| 1327 | register_cpu_notifier(&buffer->cpu_notify); | 1327 | __register_cpu_notifier(&buffer->cpu_notify); |
| 1328 | cpu_notifier_register_done(); | ||
| 1328 | #endif | 1329 | #endif |
| 1329 | 1330 | ||
| 1330 | put_online_cpus(); | ||
| 1331 | mutex_init(&buffer->mutex); | 1331 | mutex_init(&buffer->mutex); |
| 1332 | 1332 | ||
| 1333 | return buffer; | 1333 | return buffer; |
| @@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1341 | 1341 | ||
| 1342 | fail_free_cpumask: | 1342 | fail_free_cpumask: |
| 1343 | free_cpumask_var(buffer->cpumask); | 1343 | free_cpumask_var(buffer->cpumask); |
| 1344 | put_online_cpus(); | 1344 | #ifdef CONFIG_HOTPLUG_CPU |
| 1345 | cpu_notifier_register_done(); | ||
| 1346 | #endif | ||
| 1345 | 1347 | ||
| 1346 | fail_free_buffer: | 1348 | fail_free_buffer: |
| 1347 | kfree(buffer); | 1349 | kfree(buffer); |
| @@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 1358 | { | 1360 | { |
| 1359 | int cpu; | 1361 | int cpu; |
| 1360 | 1362 | ||
| 1361 | get_online_cpus(); | ||
| 1362 | |||
| 1363 | #ifdef CONFIG_HOTPLUG_CPU | 1363 | #ifdef CONFIG_HOTPLUG_CPU |
| 1364 | unregister_cpu_notifier(&buffer->cpu_notify); | 1364 | cpu_notifier_register_begin(); |
| 1365 | __unregister_cpu_notifier(&buffer->cpu_notify); | ||
| 1365 | #endif | 1366 | #endif |
| 1366 | 1367 | ||
| 1367 | for_each_buffer_cpu(buffer, cpu) | 1368 | for_each_buffer_cpu(buffer, cpu) |
| 1368 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 1369 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 1369 | 1370 | ||
| 1370 | put_online_cpus(); | 1371 | #ifdef CONFIG_HOTPLUG_CPU |
| 1372 | cpu_notifier_register_done(); | ||
| 1373 | #endif | ||
| 1371 | 1374 | ||
| 1372 | kfree(buffer->buffers); | 1375 | kfree(buffer->buffers); |
| 1373 | free_cpumask_var(buffer->cpumask); | 1376 | free_cpumask_var(buffer->cpumask); |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index a5457d577b98..0434ff1b808e 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -40,8 +40,8 @@ static int write_iteration = 50; | |||
| 40 | module_param(write_iteration, uint, 0644); | 40 | module_param(write_iteration, uint, 0644); |
| 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); |
| 42 | 42 | ||
| 43 | static int producer_nice = 19; | 43 | static int producer_nice = MAX_NICE; |
| 44 | static int consumer_nice = 19; | 44 | static int consumer_nice = MAX_NICE; |
| 45 | 45 | ||
| 46 | static int producer_fifo = -1; | 46 | static int producer_fifo = -1; |
| 47 | static int consumer_fifo = -1; | 47 | static int consumer_fifo = -1; |
| @@ -308,7 +308,7 @@ static void ring_buffer_producer(void) | |||
| 308 | 308 | ||
| 309 | /* Let the user know that the test is running at low priority */ | 309 | /* Let the user know that the test is running at low priority */ |
| 310 | if (producer_fifo < 0 && consumer_fifo < 0 && | 310 | if (producer_fifo < 0 && consumer_fifo < 0 && |
| 311 | producer_nice == 19 && consumer_nice == 19) | 311 | producer_nice == MAX_NICE && consumer_nice == MAX_NICE) |
| 312 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); | 312 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); |
| 313 | 313 | ||
| 314 | trace_printk("Time: %lld (usecs)\n", time); | 314 | trace_printk("Time: %lld (usecs)\n", time); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 815c878f409b..737b0efa1a62 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = { | |||
| 73 | .opts = dummy_tracer_opt | 73 | .opts = dummy_tracer_opt |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 76 | static int |
| 77 | dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 77 | { | 78 | { |
| 78 | return 0; | 79 | return 0; |
| 79 | } | 80 | } |
| @@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
| 118 | /* When set, tracing will stop when a WARN*() is hit */ | 119 | /* When set, tracing will stop when a WARN*() is hit */ |
| 119 | int __disable_trace_on_warning; | 120 | int __disable_trace_on_warning; |
| 120 | 121 | ||
| 121 | static int tracing_set_tracer(const char *buf); | 122 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
| 122 | 123 | ||
| 123 | #define MAX_TRACER_SIZE 100 | 124 | #define MAX_TRACER_SIZE 100 |
| 124 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 125 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
| @@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str) | |||
| 180 | } | 181 | } |
| 181 | __setup("trace_options=", set_trace_boot_options); | 182 | __setup("trace_options=", set_trace_boot_options); |
| 182 | 183 | ||
| 184 | static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; | ||
| 185 | static char *trace_boot_clock __initdata; | ||
| 186 | |||
| 187 | static int __init set_trace_boot_clock(char *str) | ||
| 188 | { | ||
| 189 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | ||
| 190 | trace_boot_clock = trace_boot_clock_buf; | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | __setup("trace_clock=", set_trace_boot_clock); | ||
| 194 | |||
| 183 | 195 | ||
| 184 | unsigned long long ns2usecs(cycle_t nsec) | 196 | unsigned long long ns2usecs(cycle_t nsec) |
| 185 | { | 197 | { |
| @@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type) | |||
| 1230 | 1242 | ||
| 1231 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 1243 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
| 1232 | /* Do we want this tracer to start on bootup? */ | 1244 | /* Do we want this tracer to start on bootup? */ |
| 1233 | tracing_set_tracer(type->name); | 1245 | tracing_set_tracer(&global_trace, type->name); |
| 1234 | default_bootup_tracer = NULL; | 1246 | default_bootup_tracer = NULL; |
| 1235 | /* disable other selftests, since this will break it. */ | 1247 | /* disable other selftests, since this will break it. */ |
| 1236 | tracing_selftest_disabled = true; | 1248 | tracing_selftest_disabled = true; |
| @@ -1600,15 +1612,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer, | |||
| 1600 | } | 1612 | } |
| 1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | 1613 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); |
| 1602 | 1614 | ||
| 1615 | static struct ring_buffer *temp_buffer; | ||
| 1616 | |||
| 1603 | struct ring_buffer_event * | 1617 | struct ring_buffer_event * |
| 1604 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1618 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 1605 | struct ftrace_event_file *ftrace_file, | 1619 | struct ftrace_event_file *ftrace_file, |
| 1606 | int type, unsigned long len, | 1620 | int type, unsigned long len, |
| 1607 | unsigned long flags, int pc) | 1621 | unsigned long flags, int pc) |
| 1608 | { | 1622 | { |
| 1623 | struct ring_buffer_event *entry; | ||
| 1624 | |||
| 1609 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1625 | *current_rb = ftrace_file->tr->trace_buffer.buffer; |
| 1610 | return trace_buffer_lock_reserve(*current_rb, | 1626 | entry = trace_buffer_lock_reserve(*current_rb, |
| 1611 | type, len, flags, pc); | 1627 | type, len, flags, pc); |
| 1628 | /* | ||
| 1629 | * If tracing is off, but we have triggers enabled | ||
| 1630 | * we still need to look at the event data. Use the temp_buffer | ||
| 1631 | * to store the trace event for the tigger to use. It's recusive | ||
| 1632 | * safe and will not be recorded anywhere. | ||
| 1633 | */ | ||
| 1634 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | ||
| 1635 | *current_rb = temp_buffer; | ||
| 1636 | entry = trace_buffer_lock_reserve(*current_rb, | ||
| 1637 | type, len, flags, pc); | ||
| 1638 | } | ||
| 1639 | return entry; | ||
| 1612 | } | 1640 | } |
| 1613 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | 1641 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
| 1614 | 1642 | ||
| @@ -3121,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
| 3121 | return ret; | 3149 | return ret; |
| 3122 | } | 3150 | } |
| 3123 | 3151 | ||
| 3152 | /* | ||
| 3153 | * Some tracers are not suitable for instance buffers. | ||
| 3154 | * A tracer is always available for the global array (toplevel) | ||
| 3155 | * or if it explicitly states that it is. | ||
| 3156 | */ | ||
| 3157 | static bool | ||
| 3158 | trace_ok_for_array(struct tracer *t, struct trace_array *tr) | ||
| 3159 | { | ||
| 3160 | return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | /* Find the next tracer that this trace array may use */ | ||
| 3164 | static struct tracer * | ||
| 3165 | get_tracer_for_array(struct trace_array *tr, struct tracer *t) | ||
| 3166 | { | ||
| 3167 | while (t && !trace_ok_for_array(t, tr)) | ||
| 3168 | t = t->next; | ||
| 3169 | |||
| 3170 | return t; | ||
| 3171 | } | ||
| 3172 | |||
| 3124 | static void * | 3173 | static void * |
| 3125 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3174 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 3126 | { | 3175 | { |
| 3176 | struct trace_array *tr = m->private; | ||
| 3127 | struct tracer *t = v; | 3177 | struct tracer *t = v; |
| 3128 | 3178 | ||
| 3129 | (*pos)++; | 3179 | (*pos)++; |
| 3130 | 3180 | ||
| 3131 | if (t) | 3181 | if (t) |
| 3132 | t = t->next; | 3182 | t = get_tracer_for_array(tr, t->next); |
| 3133 | 3183 | ||
| 3134 | return t; | 3184 | return t; |
| 3135 | } | 3185 | } |
| 3136 | 3186 | ||
| 3137 | static void *t_start(struct seq_file *m, loff_t *pos) | 3187 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 3138 | { | 3188 | { |
| 3189 | struct trace_array *tr = m->private; | ||
| 3139 | struct tracer *t; | 3190 | struct tracer *t; |
| 3140 | loff_t l = 0; | 3191 | loff_t l = 0; |
| 3141 | 3192 | ||
| 3142 | mutex_lock(&trace_types_lock); | 3193 | mutex_lock(&trace_types_lock); |
| 3143 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 3194 | |
| 3144 | ; | 3195 | t = get_tracer_for_array(tr, trace_types); |
| 3196 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
| 3197 | ; | ||
| 3145 | 3198 | ||
| 3146 | return t; | 3199 | return t; |
| 3147 | } | 3200 | } |
| @@ -3176,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = { | |||
| 3176 | 3229 | ||
| 3177 | static int show_traces_open(struct inode *inode, struct file *file) | 3230 | static int show_traces_open(struct inode *inode, struct file *file) |
| 3178 | { | 3231 | { |
| 3232 | struct trace_array *tr = inode->i_private; | ||
| 3233 | struct seq_file *m; | ||
| 3234 | int ret; | ||
| 3235 | |||
| 3179 | if (tracing_disabled) | 3236 | if (tracing_disabled) |
| 3180 | return -ENODEV; | 3237 | return -ENODEV; |
| 3181 | 3238 | ||
| 3182 | return seq_open(file, &show_traces_seq_ops); | 3239 | ret = seq_open(file, &show_traces_seq_ops); |
| 3240 | if (ret) | ||
| 3241 | return ret; | ||
| 3242 | |||
| 3243 | m = file->private_data; | ||
| 3244 | m->private = tr; | ||
| 3245 | |||
| 3246 | return 0; | ||
| 3183 | } | 3247 | } |
| 3184 | 3248 | ||
| 3185 | static ssize_t | 3249 | static ssize_t |
| @@ -3339,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) | |||
| 3339 | return 0; | 3403 | return 0; |
| 3340 | } | 3404 | } |
| 3341 | 3405 | ||
| 3342 | static int __set_tracer_option(struct tracer *trace, | 3406 | static int __set_tracer_option(struct trace_array *tr, |
| 3343 | struct tracer_flags *tracer_flags, | 3407 | struct tracer_flags *tracer_flags, |
| 3344 | struct tracer_opt *opts, int neg) | 3408 | struct tracer_opt *opts, int neg) |
| 3345 | { | 3409 | { |
| 3410 | struct tracer *trace = tr->current_trace; | ||
| 3346 | int ret; | 3411 | int ret; |
| 3347 | 3412 | ||
| 3348 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 3413 | ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); |
| 3349 | if (ret) | 3414 | if (ret) |
| 3350 | return ret; | 3415 | return ret; |
| 3351 | 3416 | ||
| @@ -3357,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace, | |||
| 3357 | } | 3422 | } |
| 3358 | 3423 | ||
| 3359 | /* Try to assign a tracer specific option */ | 3424 | /* Try to assign a tracer specific option */ |
| 3360 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 3425 | static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) |
| 3361 | { | 3426 | { |
| 3427 | struct tracer *trace = tr->current_trace; | ||
| 3362 | struct tracer_flags *tracer_flags = trace->flags; | 3428 | struct tracer_flags *tracer_flags = trace->flags; |
| 3363 | struct tracer_opt *opts = NULL; | 3429 | struct tracer_opt *opts = NULL; |
| 3364 | int i; | 3430 | int i; |
| @@ -3367,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 3367 | opts = &tracer_flags->opts[i]; | 3433 | opts = &tracer_flags->opts[i]; |
| 3368 | 3434 | ||
| 3369 | if (strcmp(cmp, opts->name) == 0) | 3435 | if (strcmp(cmp, opts->name) == 0) |
| 3370 | return __set_tracer_option(trace, trace->flags, | 3436 | return __set_tracer_option(tr, trace->flags, opts, neg); |
| 3371 | opts, neg); | ||
| 3372 | } | 3437 | } |
| 3373 | 3438 | ||
| 3374 | return -EINVAL; | 3439 | return -EINVAL; |
| @@ -3391,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
| 3391 | 3456 | ||
| 3392 | /* Give the tracer a chance to approve the change */ | 3457 | /* Give the tracer a chance to approve the change */ |
| 3393 | if (tr->current_trace->flag_changed) | 3458 | if (tr->current_trace->flag_changed) |
| 3394 | if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) | 3459 | if (tr->current_trace->flag_changed(tr, mask, !!enabled)) |
| 3395 | return -EINVAL; | 3460 | return -EINVAL; |
| 3396 | 3461 | ||
| 3397 | if (enabled) | 3462 | if (enabled) |
| @@ -3440,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option) | |||
| 3440 | 3505 | ||
| 3441 | /* If no option could be set, test the specific tracer options */ | 3506 | /* If no option could be set, test the specific tracer options */ |
| 3442 | if (!trace_options[i]) | 3507 | if (!trace_options[i]) |
| 3443 | ret = set_tracer_option(tr->current_trace, cmp, neg); | 3508 | ret = set_tracer_option(tr, cmp, neg); |
| 3444 | 3509 | ||
| 3445 | mutex_unlock(&trace_types_lock); | 3510 | mutex_unlock(&trace_types_lock); |
| 3446 | 3511 | ||
| @@ -3546,6 +3611,8 @@ static const char readme_msg[] = | |||
| 3546 | #ifdef CONFIG_TRACER_SNAPSHOT | 3611 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 3547 | "\t\t snapshot\n" | 3612 | "\t\t snapshot\n" |
| 3548 | #endif | 3613 | #endif |
| 3614 | "\t\t dump\n" | ||
| 3615 | "\t\t cpudump\n" | ||
| 3549 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" | 3616 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" |
| 3550 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" | 3617 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" |
| 3551 | "\t The first one will disable tracing every time do_fault is hit\n" | 3618 | "\t The first one will disable tracing every time do_fault is hit\n" |
| @@ -3869,10 +3936,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); | |||
| 3869 | static void | 3936 | static void |
| 3870 | destroy_trace_option_files(struct trace_option_dentry *topts); | 3937 | destroy_trace_option_files(struct trace_option_dentry *topts); |
| 3871 | 3938 | ||
| 3872 | static int tracing_set_tracer(const char *buf) | 3939 | /* |
| 3940 | * Used to clear out the tracer before deletion of an instance. | ||
| 3941 | * Must have trace_types_lock held. | ||
| 3942 | */ | ||
| 3943 | static void tracing_set_nop(struct trace_array *tr) | ||
| 3944 | { | ||
| 3945 | if (tr->current_trace == &nop_trace) | ||
| 3946 | return; | ||
| 3947 | |||
| 3948 | tr->current_trace->enabled--; | ||
| 3949 | |||
| 3950 | if (tr->current_trace->reset) | ||
| 3951 | tr->current_trace->reset(tr); | ||
| 3952 | |||
| 3953 | tr->current_trace = &nop_trace; | ||
| 3954 | } | ||
| 3955 | |||
| 3956 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | ||
| 3873 | { | 3957 | { |
| 3874 | static struct trace_option_dentry *topts; | 3958 | static struct trace_option_dentry *topts; |
| 3875 | struct trace_array *tr = &global_trace; | ||
| 3876 | struct tracer *t; | 3959 | struct tracer *t; |
| 3877 | #ifdef CONFIG_TRACER_MAX_TRACE | 3960 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3878 | bool had_max_tr; | 3961 | bool had_max_tr; |
| @@ -3900,9 +3983,15 @@ static int tracing_set_tracer(const char *buf) | |||
| 3900 | if (t == tr->current_trace) | 3983 | if (t == tr->current_trace) |
| 3901 | goto out; | 3984 | goto out; |
| 3902 | 3985 | ||
| 3986 | /* Some tracers are only allowed for the top level buffer */ | ||
| 3987 | if (!trace_ok_for_array(t, tr)) { | ||
| 3988 | ret = -EINVAL; | ||
| 3989 | goto out; | ||
| 3990 | } | ||
| 3991 | |||
| 3903 | trace_branch_disable(); | 3992 | trace_branch_disable(); |
| 3904 | 3993 | ||
| 3905 | tr->current_trace->enabled = false; | 3994 | tr->current_trace->enabled--; |
| 3906 | 3995 | ||
| 3907 | if (tr->current_trace->reset) | 3996 | if (tr->current_trace->reset) |
| 3908 | tr->current_trace->reset(tr); | 3997 | tr->current_trace->reset(tr); |
| @@ -3925,9 +4014,11 @@ static int tracing_set_tracer(const char *buf) | |||
| 3925 | free_snapshot(tr); | 4014 | free_snapshot(tr); |
| 3926 | } | 4015 | } |
| 3927 | #endif | 4016 | #endif |
| 3928 | destroy_trace_option_files(topts); | 4017 | /* Currently, only the top instance has options */ |
| 3929 | 4018 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
| 3930 | topts = create_trace_option_files(tr, t); | 4019 | destroy_trace_option_files(topts); |
| 4020 | topts = create_trace_option_files(tr, t); | ||
| 4021 | } | ||
| 3931 | 4022 | ||
| 3932 | #ifdef CONFIG_TRACER_MAX_TRACE | 4023 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3933 | if (t->use_max_tr && !had_max_tr) { | 4024 | if (t->use_max_tr && !had_max_tr) { |
| @@ -3944,7 +4035,7 @@ static int tracing_set_tracer(const char *buf) | |||
| 3944 | } | 4035 | } |
| 3945 | 4036 | ||
| 3946 | tr->current_trace = t; | 4037 | tr->current_trace = t; |
| 3947 | tr->current_trace->enabled = true; | 4038 | tr->current_trace->enabled++; |
| 3948 | trace_branch_enable(tr); | 4039 | trace_branch_enable(tr); |
| 3949 | out: | 4040 | out: |
| 3950 | mutex_unlock(&trace_types_lock); | 4041 | mutex_unlock(&trace_types_lock); |
| @@ -3956,6 +4047,7 @@ static ssize_t | |||
| 3956 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 4047 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
| 3957 | size_t cnt, loff_t *ppos) | 4048 | size_t cnt, loff_t *ppos) |
| 3958 | { | 4049 | { |
| 4050 | struct trace_array *tr = filp->private_data; | ||
| 3959 | char buf[MAX_TRACER_SIZE+1]; | 4051 | char buf[MAX_TRACER_SIZE+1]; |
| 3960 | int i; | 4052 | int i; |
| 3961 | size_t ret; | 4053 | size_t ret; |
| @@ -3975,7 +4067,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 3975 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 4067 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
| 3976 | buf[i] = 0; | 4068 | buf[i] = 0; |
| 3977 | 4069 | ||
| 3978 | err = tracing_set_tracer(buf); | 4070 | err = tracing_set_tracer(tr, buf); |
| 3979 | if (err) | 4071 | if (err) |
| 3980 | return err; | 4072 | return err; |
| 3981 | 4073 | ||
| @@ -4300,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 4300 | 4392 | ||
| 4301 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 4393 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
| 4302 | .can_merge = 0, | 4394 | .can_merge = 0, |
| 4303 | .map = generic_pipe_buf_map, | ||
| 4304 | .unmap = generic_pipe_buf_unmap, | ||
| 4305 | .confirm = generic_pipe_buf_confirm, | 4395 | .confirm = generic_pipe_buf_confirm, |
| 4306 | .release = generic_pipe_buf_release, | 4396 | .release = generic_pipe_buf_release, |
| 4307 | .steal = generic_pipe_buf_steal, | 4397 | .steal = generic_pipe_buf_steal, |
| @@ -4396,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 4396 | trace_access_lock(iter->cpu_file); | 4486 | trace_access_lock(iter->cpu_file); |
| 4397 | 4487 | ||
| 4398 | /* Fill as many pages as possible. */ | 4488 | /* Fill as many pages as possible. */ |
| 4399 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { | 4489 | for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { |
| 4400 | spd.pages[i] = alloc_page(GFP_KERNEL); | 4490 | spd.pages[i] = alloc_page(GFP_KERNEL); |
| 4401 | if (!spd.pages[i]) | 4491 | if (!spd.pages[i]) |
| 4402 | break; | 4492 | break; |
| @@ -4683,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v) | |||
| 4683 | return 0; | 4773 | return 0; |
| 4684 | } | 4774 | } |
| 4685 | 4775 | ||
| 4686 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 4776 | static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
| 4687 | size_t cnt, loff_t *fpos) | ||
| 4688 | { | 4777 | { |
| 4689 | struct seq_file *m = filp->private_data; | ||
| 4690 | struct trace_array *tr = m->private; | ||
| 4691 | char buf[64]; | ||
| 4692 | const char *clockstr; | ||
| 4693 | int i; | 4778 | int i; |
| 4694 | 4779 | ||
| 4695 | if (cnt >= sizeof(buf)) | ||
| 4696 | return -EINVAL; | ||
| 4697 | |||
| 4698 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4699 | return -EFAULT; | ||
| 4700 | |||
| 4701 | buf[cnt] = 0; | ||
| 4702 | |||
| 4703 | clockstr = strstrip(buf); | ||
| 4704 | |||
| 4705 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 4780 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
| 4706 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 4781 | if (strcmp(trace_clocks[i].name, clockstr) == 0) |
| 4707 | break; | 4782 | break; |
| @@ -4729,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 4729 | 4804 | ||
| 4730 | mutex_unlock(&trace_types_lock); | 4805 | mutex_unlock(&trace_types_lock); |
| 4731 | 4806 | ||
| 4807 | return 0; | ||
| 4808 | } | ||
| 4809 | |||
| 4810 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | ||
| 4811 | size_t cnt, loff_t *fpos) | ||
| 4812 | { | ||
| 4813 | struct seq_file *m = filp->private_data; | ||
| 4814 | struct trace_array *tr = m->private; | ||
| 4815 | char buf[64]; | ||
| 4816 | const char *clockstr; | ||
| 4817 | int ret; | ||
| 4818 | |||
| 4819 | if (cnt >= sizeof(buf)) | ||
| 4820 | return -EINVAL; | ||
| 4821 | |||
| 4822 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4823 | return -EFAULT; | ||
| 4824 | |||
| 4825 | buf[cnt] = 0; | ||
| 4826 | |||
| 4827 | clockstr = strstrip(buf); | ||
| 4828 | |||
| 4829 | ret = tracing_set_clock(tr, clockstr); | ||
| 4830 | if (ret) | ||
| 4831 | return ret; | ||
| 4832 | |||
| 4732 | *fpos += cnt; | 4833 | *fpos += cnt; |
| 4733 | 4834 | ||
| 4734 | return cnt; | 4835 | return cnt; |
| @@ -5178,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 5178 | /* Pipe buffer operations for a buffer. */ | 5279 | /* Pipe buffer operations for a buffer. */ |
| 5179 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 5280 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
| 5180 | .can_merge = 0, | 5281 | .can_merge = 0, |
| 5181 | .map = generic_pipe_buf_map, | ||
| 5182 | .unmap = generic_pipe_buf_unmap, | ||
| 5183 | .confirm = generic_pipe_buf_confirm, | 5282 | .confirm = generic_pipe_buf_confirm, |
| 5184 | .release = buffer_pipe_buf_release, | 5283 | .release = buffer_pipe_buf_release, |
| 5185 | .steal = generic_pipe_buf_steal, | 5284 | .steal = generic_pipe_buf_steal, |
| @@ -5255,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5255 | trace_access_lock(iter->cpu_file); | 5354 | trace_access_lock(iter->cpu_file); |
| 5256 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); | 5355 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
| 5257 | 5356 | ||
| 5258 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { | 5357 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
| 5259 | struct page *page; | 5358 | struct page *page; |
| 5260 | int r; | 5359 | int r; |
| 5261 | 5360 | ||
| @@ -5689,7 +5788,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 5689 | 5788 | ||
| 5690 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 5789 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
| 5691 | mutex_lock(&trace_types_lock); | 5790 | mutex_lock(&trace_types_lock); |
| 5692 | ret = __set_tracer_option(topt->tr->current_trace, topt->flags, | 5791 | ret = __set_tracer_option(topt->tr, topt->flags, |
| 5693 | topt->opt, !val); | 5792 | topt->opt, !val); |
| 5694 | mutex_unlock(&trace_types_lock); | 5793 | mutex_unlock(&trace_types_lock); |
| 5695 | if (ret) | 5794 | if (ret) |
| @@ -6096,7 +6195,9 @@ static int instance_delete(const char *name) | |||
| 6096 | 6195 | ||
| 6097 | list_del(&tr->list); | 6196 | list_del(&tr->list); |
| 6098 | 6197 | ||
| 6198 | tracing_set_nop(tr); | ||
| 6099 | event_trace_del_tracer(tr); | 6199 | event_trace_del_tracer(tr); |
| 6200 | ftrace_destroy_function_files(tr); | ||
| 6100 | debugfs_remove_recursive(tr->dir); | 6201 | debugfs_remove_recursive(tr->dir); |
| 6101 | free_percpu(tr->trace_buffer.data); | 6202 | free_percpu(tr->trace_buffer.data); |
| 6102 | ring_buffer_free(tr->trace_buffer.buffer); | 6203 | ring_buffer_free(tr->trace_buffer.buffer); |
| @@ -6191,6 +6292,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6191 | { | 6292 | { |
| 6192 | int cpu; | 6293 | int cpu; |
| 6193 | 6294 | ||
| 6295 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6296 | tr, &show_traces_fops); | ||
| 6297 | |||
| 6298 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6299 | tr, &set_tracer_fops); | ||
| 6300 | |||
| 6194 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 6301 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
| 6195 | tr, &tracing_cpumask_fops); | 6302 | tr, &tracing_cpumask_fops); |
| 6196 | 6303 | ||
| @@ -6221,6 +6328,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6221 | trace_create_file("tracing_on", 0644, d_tracer, | 6328 | trace_create_file("tracing_on", 0644, d_tracer, |
| 6222 | tr, &rb_simple_fops); | 6329 | tr, &rb_simple_fops); |
| 6223 | 6330 | ||
| 6331 | if (ftrace_create_function_files(tr, d_tracer)) | ||
| 6332 | WARN(1, "Could not allocate function filter files"); | ||
| 6333 | |||
| 6224 | #ifdef CONFIG_TRACER_SNAPSHOT | 6334 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 6225 | trace_create_file("snapshot", 0644, d_tracer, | 6335 | trace_create_file("snapshot", 0644, d_tracer, |
| 6226 | tr, &snapshot_fops); | 6336 | tr, &snapshot_fops); |
| @@ -6243,12 +6353,6 @@ static __init int tracer_init_debugfs(void) | |||
| 6243 | 6353 | ||
| 6244 | init_tracer_debugfs(&global_trace, d_tracer); | 6354 | init_tracer_debugfs(&global_trace, d_tracer); |
| 6245 | 6355 | ||
| 6246 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6247 | &global_trace, &show_traces_fops); | ||
| 6248 | |||
| 6249 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6250 | &global_trace, &set_tracer_fops); | ||
| 6251 | |||
| 6252 | #ifdef CONFIG_TRACER_MAX_TRACE | 6356 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 6253 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 6357 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 6254 | &tracing_max_latency, &tracing_max_lat_fops); | 6358 | &tracing_max_latency, &tracing_max_lat_fops); |
| @@ -6494,11 +6598,16 @@ __init static int tracer_alloc_buffers(void) | |||
| 6494 | 6598 | ||
| 6495 | raw_spin_lock_init(&global_trace.start_lock); | 6599 | raw_spin_lock_init(&global_trace.start_lock); |
| 6496 | 6600 | ||
| 6601 | /* Used for event triggers */ | ||
| 6602 | temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); | ||
| 6603 | if (!temp_buffer) | ||
| 6604 | goto out_free_cpumask; | ||
| 6605 | |||
| 6497 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 6606 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
| 6498 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { | 6607 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
| 6499 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 6608 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
| 6500 | WARN_ON(1); | 6609 | WARN_ON(1); |
| 6501 | goto out_free_cpumask; | 6610 | goto out_free_temp_buffer; |
| 6502 | } | 6611 | } |
| 6503 | 6612 | ||
| 6504 | if (global_trace.buffer_disabled) | 6613 | if (global_trace.buffer_disabled) |
| @@ -6506,6 +6615,13 @@ __init static int tracer_alloc_buffers(void) | |||
| 6506 | 6615 | ||
| 6507 | trace_init_cmdlines(); | 6616 | trace_init_cmdlines(); |
| 6508 | 6617 | ||
| 6618 | if (trace_boot_clock) { | ||
| 6619 | ret = tracing_set_clock(&global_trace, trace_boot_clock); | ||
| 6620 | if (ret < 0) | ||
| 6621 | pr_warning("Trace clock %s not defined, going back to default\n", | ||
| 6622 | trace_boot_clock); | ||
| 6623 | } | ||
| 6624 | |||
| 6509 | /* | 6625 | /* |
| 6510 | * register_tracer() might reference current_trace, so it | 6626 | * register_tracer() might reference current_trace, so it |
| 6511 | * needs to be set before we register anything. This is | 6627 | * needs to be set before we register anything. This is |
| @@ -6540,6 +6656,8 @@ __init static int tracer_alloc_buffers(void) | |||
| 6540 | 6656 | ||
| 6541 | return 0; | 6657 | return 0; |
| 6542 | 6658 | ||
| 6659 | out_free_temp_buffer: | ||
| 6660 | ring_buffer_free(temp_buffer); | ||
| 6543 | out_free_cpumask: | 6661 | out_free_cpumask: |
| 6544 | free_percpu(global_trace.trace_buffer.data); | 6662 | free_percpu(global_trace.trace_buffer.data); |
| 6545 | #ifdef CONFIG_TRACER_MAX_TRACE | 6663 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..2e29d7ba5a52 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/hw_breakpoint.h> | 13 | #include <linux/hw_breakpoint.h> |
| 14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
| 15 | #include <linux/ftrace_event.h> | 15 | #include <linux/ftrace_event.h> |
| 16 | #include <linux/compiler.h> | ||
| 16 | 17 | ||
| 17 | #ifdef CONFIG_FTRACE_SYSCALLS | 18 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 18 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | 19 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
| @@ -210,6 +211,11 @@ struct trace_array { | |||
| 210 | struct list_head events; | 211 | struct list_head events; |
| 211 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ | 212 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
| 212 | int ref; | 213 | int ref; |
| 214 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 215 | struct ftrace_ops *ops; | ||
| 216 | /* function tracing enabled */ | ||
| 217 | int function_enabled; | ||
| 218 | #endif | ||
| 213 | }; | 219 | }; |
| 214 | 220 | ||
| 215 | enum { | 221 | enum { |
| @@ -355,14 +361,16 @@ struct tracer { | |||
| 355 | void (*print_header)(struct seq_file *m); | 361 | void (*print_header)(struct seq_file *m); |
| 356 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 362 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
| 357 | /* If you handled the flag setting, return 0 */ | 363 | /* If you handled the flag setting, return 0 */ |
| 358 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 364 | int (*set_flag)(struct trace_array *tr, |
| 365 | u32 old_flags, u32 bit, int set); | ||
| 359 | /* Return 0 if OK with change, else return non-zero */ | 366 | /* Return 0 if OK with change, else return non-zero */ |
| 360 | int (*flag_changed)(struct tracer *tracer, | 367 | int (*flag_changed)(struct trace_array *tr, |
| 361 | u32 mask, int set); | 368 | u32 mask, int set); |
| 362 | struct tracer *next; | 369 | struct tracer *next; |
| 363 | struct tracer_flags *flags; | 370 | struct tracer_flags *flags; |
| 371 | int enabled; | ||
| 364 | bool print_max; | 372 | bool print_max; |
| 365 | bool enabled; | 373 | bool allow_instances; |
| 366 | #ifdef CONFIG_TRACER_MAX_TRACE | 374 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 367 | bool use_max_tr; | 375 | bool use_max_tr; |
| 368 | #endif | 376 | #endif |
| @@ -812,13 +820,36 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
| 812 | return test_tsk_trace_trace(task); | 820 | return test_tsk_trace_trace(task); |
| 813 | } | 821 | } |
| 814 | extern int ftrace_is_dead(void); | 822 | extern int ftrace_is_dead(void); |
| 823 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 824 | struct dentry *parent); | ||
| 825 | void ftrace_destroy_function_files(struct trace_array *tr); | ||
| 815 | #else | 826 | #else |
| 816 | static inline int ftrace_trace_task(struct task_struct *task) | 827 | static inline int ftrace_trace_task(struct task_struct *task) |
| 817 | { | 828 | { |
| 818 | return 1; | 829 | return 1; |
| 819 | } | 830 | } |
| 820 | static inline int ftrace_is_dead(void) { return 0; } | 831 | static inline int ftrace_is_dead(void) { return 0; } |
| 821 | #endif | 832 | static inline int |
| 833 | ftrace_create_function_files(struct trace_array *tr, | ||
| 834 | struct dentry *parent) | ||
| 835 | { | ||
| 836 | return 0; | ||
| 837 | } | ||
| 838 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } | ||
| 839 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
| 840 | |||
| 841 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | ||
| 842 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 843 | struct dentry *parent); | ||
| 844 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | ||
| 845 | #else | ||
| 846 | /* | ||
| 847 | * The ops parameter passed in is usually undefined. | ||
| 848 | * This must be a macro. | ||
| 849 | */ | ||
| 850 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | ||
| 851 | #define ftrace_destroy_filter_files(ops) do { } while (0) | ||
| 852 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | ||
| 822 | 853 | ||
| 823 | int ftrace_event_is_function(struct ftrace_event_call *call); | 854 | int ftrace_event_is_function(struct ftrace_event_call *call); |
| 824 | 855 | ||
| @@ -1249,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); | |||
| 1249 | #undef FTRACE_ENTRY | 1280 | #undef FTRACE_ENTRY |
| 1250 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1281 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
| 1251 | extern struct ftrace_event_call \ | 1282 | extern struct ftrace_event_call \ |
| 1252 | __attribute__((__aligned__(4))) event_##call; | 1283 | __aligned(4) event_##call; |
| 1253 | #undef FTRACE_ENTRY_DUP | 1284 | #undef FTRACE_ENTRY_DUP |
| 1254 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ | 1285 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
| 1255 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | 1286 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index e854f420e033..c894614de14d 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -31,9 +31,25 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | |||
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | /* The ftrace function trace is allowed only for root. */ | 33 | /* The ftrace function trace is allowed only for root. */ |
| 34 | if (ftrace_event_is_function(tp_event) && | 34 | if (ftrace_event_is_function(tp_event)) { |
| 35 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | 35 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) |
| 36 | return -EPERM; | 36 | return -EPERM; |
| 37 | |||
| 38 | /* | ||
| 39 | * We don't allow user space callchains for function trace | ||
| 40 | * event, due to issues with page faults while tracing page | ||
| 41 | * fault handler and its overall trickiness nature. | ||
| 42 | */ | ||
| 43 | if (!p_event->attr.exclude_callchain_user) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Same reason to disable user stack dump as for user space | ||
| 48 | * callchains above. | ||
| 49 | */ | ||
| 50 | if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) | ||
| 51 | return -EINVAL; | ||
| 52 | } | ||
| 37 | 53 | ||
| 38 | /* No tracing, just counting, so no obvious leak */ | 54 | /* No tracing, just counting, so no obvious leak */ |
| 39 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) | 55 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..3ddfd8f62c05 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -27,12 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
| 29 | 29 | ||
| 30 | DEFINE_MUTEX(event_storage_mutex); | ||
| 31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
| 32 | |||
| 33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
| 34 | EXPORT_SYMBOL_GPL(event_storage); | ||
| 35 | |||
| 36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
| 37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
| 38 | 32 | ||
| @@ -194,29 +188,60 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
| 194 | } | 188 | } |
| 195 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
| 196 | 190 | ||
| 191 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | ||
| 192 | struct ftrace_event_file *ftrace_file, | ||
| 193 | unsigned long len) | ||
| 194 | { | ||
| 195 | struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
| 196 | |||
| 197 | local_save_flags(fbuffer->flags); | ||
| 198 | fbuffer->pc = preempt_count(); | ||
| 199 | fbuffer->ftrace_file = ftrace_file; | ||
| 200 | |||
| 201 | fbuffer->event = | ||
| 202 | trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, | ||
| 203 | event_call->event.type, len, | ||
| 204 | fbuffer->flags, fbuffer->pc); | ||
| 205 | if (!fbuffer->event) | ||
| 206 | return NULL; | ||
| 207 | |||
| 208 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); | ||
| 209 | return fbuffer->entry; | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | ||
| 212 | |||
| 213 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | ||
| 214 | { | ||
| 215 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | ||
| 216 | fbuffer->event, fbuffer->entry, | ||
| 217 | fbuffer->flags, fbuffer->pc); | ||
| 218 | } | ||
| 219 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); | ||
| 220 | |||
| 197 | int ftrace_event_reg(struct ftrace_event_call *call, | 221 | int ftrace_event_reg(struct ftrace_event_call *call, |
| 198 | enum trace_reg type, void *data) | 222 | enum trace_reg type, void *data) |
| 199 | { | 223 | { |
| 200 | struct ftrace_event_file *file = data; | 224 | struct ftrace_event_file *file = data; |
| 201 | 225 | ||
| 226 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); | ||
| 202 | switch (type) { | 227 | switch (type) { |
| 203 | case TRACE_REG_REGISTER: | 228 | case TRACE_REG_REGISTER: |
| 204 | return tracepoint_probe_register(call->name, | 229 | return tracepoint_probe_register(call->tp, |
| 205 | call->class->probe, | 230 | call->class->probe, |
| 206 | file); | 231 | file); |
| 207 | case TRACE_REG_UNREGISTER: | 232 | case TRACE_REG_UNREGISTER: |
| 208 | tracepoint_probe_unregister(call->name, | 233 | tracepoint_probe_unregister(call->tp, |
| 209 | call->class->probe, | 234 | call->class->probe, |
| 210 | file); | 235 | file); |
| 211 | return 0; | 236 | return 0; |
| 212 | 237 | ||
| 213 | #ifdef CONFIG_PERF_EVENTS | 238 | #ifdef CONFIG_PERF_EVENTS |
| 214 | case TRACE_REG_PERF_REGISTER: | 239 | case TRACE_REG_PERF_REGISTER: |
| 215 | return tracepoint_probe_register(call->name, | 240 | return tracepoint_probe_register(call->tp, |
| 216 | call->class->perf_probe, | 241 | call->class->perf_probe, |
| 217 | call); | 242 | call); |
| 218 | case TRACE_REG_PERF_UNREGISTER: | 243 | case TRACE_REG_PERF_UNREGISTER: |
| 219 | tracepoint_probe_unregister(call->name, | 244 | tracepoint_probe_unregister(call->tp, |
| 220 | call->class->perf_probe, | 245 | call->class->perf_probe, |
| 221 | call); | 246 | call); |
| 222 | return 0; | 247 | return 0; |
| @@ -328,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
| 328 | if (ret) { | 353 | if (ret) { |
| 329 | tracing_stop_cmdline_record(); | 354 | tracing_stop_cmdline_record(); |
| 330 | pr_info("event trace: Could not enable event " | 355 | pr_info("event trace: Could not enable event " |
| 331 | "%s\n", call->name); | 356 | "%s\n", ftrace_event_name(call)); |
| 332 | break; | 357 | break; |
| 333 | } | 358 | } |
| 334 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 359 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
| @@ -457,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, | |||
| 457 | { | 482 | { |
| 458 | struct ftrace_event_file *file; | 483 | struct ftrace_event_file *file; |
| 459 | struct ftrace_event_call *call; | 484 | struct ftrace_event_call *call; |
| 485 | const char *name; | ||
| 460 | int ret = -EINVAL; | 486 | int ret = -EINVAL; |
| 461 | 487 | ||
| 462 | list_for_each_entry(file, &tr->events, list) { | 488 | list_for_each_entry(file, &tr->events, list) { |
| 463 | 489 | ||
| 464 | call = file->event_call; | 490 | call = file->event_call; |
| 491 | name = ftrace_event_name(call); | ||
| 465 | 492 | ||
| 466 | if (!call->name || !call->class || !call->class->reg) | 493 | if (!name || !call->class || !call->class->reg) |
| 467 | continue; | 494 | continue; |
| 468 | 495 | ||
| 469 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 496 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 470 | continue; | 497 | continue; |
| 471 | 498 | ||
| 472 | if (match && | 499 | if (match && |
| 473 | strcmp(match, call->name) != 0 && | 500 | strcmp(match, name) != 0 && |
| 474 | strcmp(match, call->class->system) != 0) | 501 | strcmp(match, call->class->system) != 0) |
| 475 | continue; | 502 | continue; |
| 476 | 503 | ||
| 477 | if (sub && strcmp(sub, call->class->system) != 0) | 504 | if (sub && strcmp(sub, call->class->system) != 0) |
| 478 | continue; | 505 | continue; |
| 479 | 506 | ||
| 480 | if (event && strcmp(event, call->name) != 0) | 507 | if (event && strcmp(event, name) != 0) |
| 481 | continue; | 508 | continue; |
| 482 | 509 | ||
| 483 | ftrace_event_enable_disable(file, set); | 510 | ftrace_event_enable_disable(file, set); |
| @@ -675,7 +702,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 675 | 702 | ||
| 676 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 703 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
| 677 | seq_printf(m, "%s:", call->class->system); | 704 | seq_printf(m, "%s:", call->class->system); |
| 678 | seq_printf(m, "%s\n", call->name); | 705 | seq_printf(m, "%s\n", ftrace_event_name(call)); |
| 679 | 706 | ||
| 680 | return 0; | 707 | return 0; |
| 681 | } | 708 | } |
| @@ -768,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 768 | mutex_lock(&event_mutex); | 795 | mutex_lock(&event_mutex); |
| 769 | list_for_each_entry(file, &tr->events, list) { | 796 | list_for_each_entry(file, &tr->events, list) { |
| 770 | call = file->event_call; | 797 | call = file->event_call; |
| 771 | if (!call->name || !call->class || !call->class->reg) | 798 | if (!ftrace_event_name(call) || !call->class || !call->class->reg) |
| 772 | continue; | 799 | continue; |
| 773 | 800 | ||
| 774 | if (system && strcmp(call->class->system, system->name) != 0) | 801 | if (system && strcmp(call->class->system, system->name) != 0) |
| @@ -883,7 +910,7 @@ static int f_show(struct seq_file *m, void *v) | |||
| 883 | 910 | ||
| 884 | switch ((unsigned long)v) { | 911 | switch ((unsigned long)v) { |
| 885 | case FORMAT_HEADER: | 912 | case FORMAT_HEADER: |
| 886 | seq_printf(m, "name: %s\n", call->name); | 913 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
| 887 | seq_printf(m, "ID: %d\n", call->event.type); | 914 | seq_printf(m, "ID: %d\n", call->event.type); |
| 888 | seq_printf(m, "format:\n"); | 915 | seq_printf(m, "format:\n"); |
| 889 | return 0; | 916 | return 0; |
| @@ -1503,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1503 | struct trace_array *tr = file->tr; | 1530 | struct trace_array *tr = file->tr; |
| 1504 | struct list_head *head; | 1531 | struct list_head *head; |
| 1505 | struct dentry *d_events; | 1532 | struct dentry *d_events; |
| 1533 | const char *name; | ||
| 1506 | int ret; | 1534 | int ret; |
| 1507 | 1535 | ||
| 1508 | /* | 1536 | /* |
| @@ -1516,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1516 | } else | 1544 | } else |
| 1517 | d_events = parent; | 1545 | d_events = parent; |
| 1518 | 1546 | ||
| 1519 | file->dir = debugfs_create_dir(call->name, d_events); | 1547 | name = ftrace_event_name(call); |
| 1548 | file->dir = debugfs_create_dir(name, d_events); | ||
| 1520 | if (!file->dir) { | 1549 | if (!file->dir) { |
| 1521 | pr_warning("Could not create debugfs '%s' directory\n", | 1550 | pr_warning("Could not create debugfs '%s' directory\n", |
| 1522 | call->name); | 1551 | name); |
| 1523 | return -1; | 1552 | return -1; |
| 1524 | } | 1553 | } |
| 1525 | 1554 | ||
| @@ -1543,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1543 | ret = call->class->define_fields(call); | 1572 | ret = call->class->define_fields(call); |
| 1544 | if (ret < 0) { | 1573 | if (ret < 0) { |
| 1545 | pr_warning("Could not initialize trace point" | 1574 | pr_warning("Could not initialize trace point" |
| 1546 | " events/%s\n", call->name); | 1575 | " events/%s\n", name); |
| 1547 | return -1; | 1576 | return -1; |
| 1548 | } | 1577 | } |
| 1549 | } | 1578 | } |
| @@ -1607,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call) | |||
| 1607 | static int event_init(struct ftrace_event_call *call) | 1636 | static int event_init(struct ftrace_event_call *call) |
| 1608 | { | 1637 | { |
| 1609 | int ret = 0; | 1638 | int ret = 0; |
| 1639 | const char *name; | ||
| 1610 | 1640 | ||
| 1611 | if (WARN_ON(!call->name)) | 1641 | name = ftrace_event_name(call); |
| 1642 | if (WARN_ON(!name)) | ||
| 1612 | return -EINVAL; | 1643 | return -EINVAL; |
| 1613 | 1644 | ||
| 1614 | if (call->class->raw_init) { | 1645 | if (call->class->raw_init) { |
| 1615 | ret = call->class->raw_init(call); | 1646 | ret = call->class->raw_init(call); |
| 1616 | if (ret < 0 && ret != -ENOSYS) | 1647 | if (ret < 0 && ret != -ENOSYS) |
| 1617 | pr_warn("Could not initialize trace events/%s\n", | 1648 | pr_warn("Could not initialize trace events/%s\n", |
| 1618 | call->name); | 1649 | name); |
| 1619 | } | 1650 | } |
| 1620 | 1651 | ||
| 1621 | return ret; | 1652 | return ret; |
| @@ -1777,6 +1808,16 @@ static void trace_module_add_events(struct module *mod) | |||
| 1777 | { | 1808 | { |
| 1778 | struct ftrace_event_call **call, **start, **end; | 1809 | struct ftrace_event_call **call, **start, **end; |
| 1779 | 1810 | ||
| 1811 | if (!mod->num_trace_events) | ||
| 1812 | return; | ||
| 1813 | |||
| 1814 | /* Don't add infrastructure for mods without tracepoints */ | ||
| 1815 | if (trace_module_has_bad_taint(mod)) { | ||
| 1816 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
| 1817 | mod->name); | ||
| 1818 | return; | ||
| 1819 | } | ||
| 1820 | |||
| 1780 | start = mod->trace_events; | 1821 | start = mod->trace_events; |
| 1781 | end = mod->trace_events + mod->num_trace_events; | 1822 | end = mod->trace_events + mod->num_trace_events; |
| 1782 | 1823 | ||
| @@ -1851,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
| 1851 | ret = __trace_add_new_event(call, tr); | 1892 | ret = __trace_add_new_event(call, tr); |
| 1852 | if (ret < 0) | 1893 | if (ret < 0) |
| 1853 | pr_warning("Could not create directory for event %s\n", | 1894 | pr_warning("Could not create directory for event %s\n", |
| 1854 | call->name); | 1895 | ftrace_event_name(call)); |
| 1855 | } | 1896 | } |
| 1856 | } | 1897 | } |
| 1857 | 1898 | ||
| @@ -1860,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
| 1860 | { | 1901 | { |
| 1861 | struct ftrace_event_file *file; | 1902 | struct ftrace_event_file *file; |
| 1862 | struct ftrace_event_call *call; | 1903 | struct ftrace_event_call *call; |
| 1904 | const char *name; | ||
| 1863 | 1905 | ||
| 1864 | list_for_each_entry(file, &tr->events, list) { | 1906 | list_for_each_entry(file, &tr->events, list) { |
| 1865 | 1907 | ||
| 1866 | call = file->event_call; | 1908 | call = file->event_call; |
| 1909 | name = ftrace_event_name(call); | ||
| 1867 | 1910 | ||
| 1868 | if (!call->name || !call->class || !call->class->reg) | 1911 | if (!name || !call->class || !call->class->reg) |
| 1869 | continue; | 1912 | continue; |
| 1870 | 1913 | ||
| 1871 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 1914 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 1872 | continue; | 1915 | continue; |
| 1873 | 1916 | ||
| 1874 | if (strcmp(event, call->name) == 0 && | 1917 | if (strcmp(event, name) == 0 && |
| 1875 | strcmp(system, call->class->system) == 0) | 1918 | strcmp(system, call->class->system) == 0) |
| 1876 | return file; | 1919 | return file; |
| 1877 | } | 1920 | } |
| @@ -1939,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
| 1939 | seq_printf(m, "%s:%s:%s", | 1982 | seq_printf(m, "%s:%s:%s", |
| 1940 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1983 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1941 | data->file->event_call->class->system, | 1984 | data->file->event_call->class->system, |
| 1942 | data->file->event_call->name); | 1985 | ftrace_event_name(data->file->event_call)); |
| 1943 | 1986 | ||
| 1944 | if (data->count == -1) | 1987 | if (data->count == -1) |
| 1945 | seq_printf(m, ":unlimited\n"); | 1988 | seq_printf(m, ":unlimited\n"); |
| @@ -2159,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
| 2159 | ret = event_create_dir(tr->event_dir, file); | 2202 | ret = event_create_dir(tr->event_dir, file); |
| 2160 | if (ret < 0) | 2203 | if (ret < 0) |
| 2161 | pr_warning("Could not create directory for event %s\n", | 2204 | pr_warning("Could not create directory for event %s\n", |
| 2162 | file->event_call->name); | 2205 | ftrace_event_name(file->event_call)); |
| 2163 | } | 2206 | } |
| 2164 | } | 2207 | } |
| 2165 | 2208 | ||
| @@ -2183,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr) | |||
| 2183 | ret = __trace_early_add_new_event(call, tr); | 2226 | ret = __trace_early_add_new_event(call, tr); |
| 2184 | if (ret < 0) | 2227 | if (ret < 0) |
| 2185 | pr_warning("Could not create early event %s\n", | 2228 | pr_warning("Could not create early event %s\n", |
| 2186 | call->name); | 2229 | ftrace_event_name(call)); |
| 2187 | } | 2230 | } |
| 2188 | } | 2231 | } |
| 2189 | 2232 | ||
| @@ -2515,7 +2558,7 @@ static __init void event_trace_self_tests(void) | |||
| 2515 | continue; | 2558 | continue; |
| 2516 | #endif | 2559 | #endif |
| 2517 | 2560 | ||
| 2518 | pr_info("Testing event %s: ", call->name); | 2561 | pr_info("Testing event %s: ", ftrace_event_name(call)); |
| 2519 | 2562 | ||
| 2520 | /* | 2563 | /* |
| 2521 | * If an event is already enabled, someone is using | 2564 | * If an event is already enabled, someone is using |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8efbb69b04f0..925f537f07d1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
| 1095 | seq_printf(m, "%s:%s:%s", | 1095 | seq_printf(m, "%s:%s:%s", |
| 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1097 | enable_data->file->event_call->class->system, | 1097 | enable_data->file->event_call->class->system, |
| 1098 | enable_data->file->event_call->name); | 1098 | ftrace_event_name(enable_data->file->event_call)); |
| 1099 | 1099 | ||
| 1100 | if (data->count == -1) | 1100 | if (data->count == -1) |
| 1101 | seq_puts(m, ":unlimited"); | 1101 | seq_puts(m, ":unlimited"); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..d4ddde28a81a 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
| 95 | #undef __array | 95 | #undef __array |
| 96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
| 97 | do { \ | 97 | do { \ |
| 98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
| 98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
| 100 | snprintf(event_storage, sizeof(event_storage), \ | ||
| 101 | "%s[%d]", #type, len); \ | ||
| 102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
| 103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
| 104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
| 105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
| 106 | mutex_unlock(&event_storage_mutex); \ | ||
| 107 | if (ret) \ | 104 | if (ret) \ |
| 108 | return ret; \ | 105 | return ret; \ |
| 109 | } while (0); | 106 | } while (0); |
| @@ -176,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \ | |||
| 176 | }; \ | 173 | }; \ |
| 177 | \ | 174 | \ |
| 178 | struct ftrace_event_call __used event_##call = { \ | 175 | struct ftrace_event_call __used event_##call = { \ |
| 179 | .name = #call, \ | ||
| 180 | .event.type = etype, \ | ||
| 181 | .class = &event_class_ftrace_##call, \ | 176 | .class = &event_class_ftrace_##call, \ |
| 177 | { \ | ||
| 178 | .name = #call, \ | ||
| 179 | }, \ | ||
| 180 | .event.type = etype, \ | ||
| 182 | .print_fmt = print, \ | 181 | .print_fmt = print, \ |
| 183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ | 182 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
| 184 | }; \ | 183 | }; \ |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38fe1483c508..5b781d2be383 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -13,32 +13,106 @@ | |||
| 13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 17 | 18 | ||
| 18 | #include "trace.h" | 19 | #include "trace.h" |
| 19 | 20 | ||
| 20 | /* function tracing enabled */ | 21 | static void tracing_start_function_trace(struct trace_array *tr); |
| 21 | static int ftrace_function_enabled; | 22 | static void tracing_stop_function_trace(struct trace_array *tr); |
| 23 | static void | ||
| 24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 26 | static void | ||
| 27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 29 | static struct ftrace_ops trace_ops; | ||
| 30 | static struct ftrace_ops trace_stack_ops; | ||
| 31 | static struct tracer_flags func_flags; | ||
| 32 | |||
| 33 | /* Our option */ | ||
| 34 | enum { | ||
| 35 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 36 | }; | ||
| 37 | |||
| 38 | static int allocate_ftrace_ops(struct trace_array *tr) | ||
| 39 | { | ||
| 40 | struct ftrace_ops *ops; | ||
| 41 | |||
| 42 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 43 | if (!ops) | ||
| 44 | return -ENOMEM; | ||
| 22 | 45 | ||
| 23 | static struct trace_array *func_trace; | 46 | /* Currently only the non stack verision is supported */ |
| 47 | ops->func = function_trace_call; | ||
| 48 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 49 | |||
| 50 | tr->ops = ops; | ||
| 51 | ops->private = tr; | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | |||
| 56 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 57 | struct dentry *parent) | ||
| 58 | { | ||
| 59 | int ret; | ||
| 60 | |||
| 61 | /* The top level array uses the "global_ops". */ | ||
| 62 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { | ||
| 63 | ret = allocate_ftrace_ops(tr); | ||
| 64 | if (ret) | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | ftrace_create_filter_files(tr->ops, parent); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 24 | 72 | ||
| 25 | static void tracing_start_function_trace(void); | 73 | void ftrace_destroy_function_files(struct trace_array *tr) |
| 26 | static void tracing_stop_function_trace(void); | 74 | { |
| 75 | ftrace_destroy_filter_files(tr->ops); | ||
| 76 | kfree(tr->ops); | ||
| 77 | tr->ops = NULL; | ||
| 78 | } | ||
| 27 | 79 | ||
| 28 | static int function_trace_init(struct trace_array *tr) | 80 | static int function_trace_init(struct trace_array *tr) |
| 29 | { | 81 | { |
| 30 | func_trace = tr; | 82 | struct ftrace_ops *ops; |
| 83 | |||
| 84 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
| 85 | /* There's only one global tr */ | ||
| 86 | if (!trace_ops.private) { | ||
| 87 | trace_ops.private = tr; | ||
| 88 | trace_stack_ops.private = tr; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 92 | ops = &trace_stack_ops; | ||
| 93 | else | ||
| 94 | ops = &trace_ops; | ||
| 95 | tr->ops = ops; | ||
| 96 | } else if (!tr->ops) { | ||
| 97 | /* | ||
| 98 | * Instance trace_arrays get their ops allocated | ||
| 99 | * at instance creation. Unless it failed | ||
| 100 | * the allocation. | ||
| 101 | */ | ||
| 102 | return -ENOMEM; | ||
| 103 | } | ||
| 104 | |||
| 31 | tr->trace_buffer.cpu = get_cpu(); | 105 | tr->trace_buffer.cpu = get_cpu(); |
| 32 | put_cpu(); | 106 | put_cpu(); |
| 33 | 107 | ||
| 34 | tracing_start_cmdline_record(); | 108 | tracing_start_cmdline_record(); |
| 35 | tracing_start_function_trace(); | 109 | tracing_start_function_trace(tr); |
| 36 | return 0; | 110 | return 0; |
| 37 | } | 111 | } |
| 38 | 112 | ||
| 39 | static void function_trace_reset(struct trace_array *tr) | 113 | static void function_trace_reset(struct trace_array *tr) |
| 40 | { | 114 | { |
| 41 | tracing_stop_function_trace(); | 115 | tracing_stop_function_trace(tr); |
| 42 | tracing_stop_cmdline_record(); | 116 | tracing_stop_cmdline_record(); |
| 43 | } | 117 | } |
| 44 | 118 | ||
| @@ -47,25 +121,18 @@ static void function_trace_start(struct trace_array *tr) | |||
| 47 | tracing_reset_online_cpus(&tr->trace_buffer); | 121 | tracing_reset_online_cpus(&tr->trace_buffer); |
| 48 | } | 122 | } |
| 49 | 123 | ||
| 50 | /* Our option */ | ||
| 51 | enum { | ||
| 52 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static struct tracer_flags func_flags; | ||
| 56 | |||
| 57 | static void | 124 | static void |
| 58 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 125 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 59 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 126 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 60 | { | 127 | { |
| 61 | struct trace_array *tr = func_trace; | 128 | struct trace_array *tr = op->private; |
| 62 | struct trace_array_cpu *data; | 129 | struct trace_array_cpu *data; |
| 63 | unsigned long flags; | 130 | unsigned long flags; |
| 64 | int bit; | 131 | int bit; |
| 65 | int cpu; | 132 | int cpu; |
| 66 | int pc; | 133 | int pc; |
| 67 | 134 | ||
| 68 | if (unlikely(!ftrace_function_enabled)) | 135 | if (unlikely(!tr->function_enabled)) |
| 69 | return; | 136 | return; |
| 70 | 137 | ||
| 71 | pc = preempt_count(); | 138 | pc = preempt_count(); |
| @@ -91,14 +158,14 @@ static void | |||
| 91 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 158 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 92 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 159 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 93 | { | 160 | { |
| 94 | struct trace_array *tr = func_trace; | 161 | struct trace_array *tr = op->private; |
| 95 | struct trace_array_cpu *data; | 162 | struct trace_array_cpu *data; |
| 96 | unsigned long flags; | 163 | unsigned long flags; |
| 97 | long disabled; | 164 | long disabled; |
| 98 | int cpu; | 165 | int cpu; |
| 99 | int pc; | 166 | int pc; |
| 100 | 167 | ||
| 101 | if (unlikely(!ftrace_function_enabled)) | 168 | if (unlikely(!tr->function_enabled)) |
| 102 | return; | 169 | return; |
| 103 | 170 | ||
| 104 | /* | 171 | /* |
| @@ -128,7 +195,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 128 | local_irq_restore(flags); | 195 | local_irq_restore(flags); |
| 129 | } | 196 | } |
| 130 | 197 | ||
| 131 | |||
| 132 | static struct ftrace_ops trace_ops __read_mostly = | 198 | static struct ftrace_ops trace_ops __read_mostly = |
| 133 | { | 199 | { |
| 134 | .func = function_trace_call, | 200 | .func = function_trace_call, |
| @@ -153,29 +219,21 @@ static struct tracer_flags func_flags = { | |||
| 153 | .opts = func_opts | 219 | .opts = func_opts |
| 154 | }; | 220 | }; |
| 155 | 221 | ||
| 156 | static void tracing_start_function_trace(void) | 222 | static void tracing_start_function_trace(struct trace_array *tr) |
| 157 | { | 223 | { |
| 158 | ftrace_function_enabled = 0; | 224 | tr->function_enabled = 0; |
| 159 | 225 | register_ftrace_function(tr->ops); | |
| 160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 226 | tr->function_enabled = 1; |
| 161 | register_ftrace_function(&trace_stack_ops); | ||
| 162 | else | ||
| 163 | register_ftrace_function(&trace_ops); | ||
| 164 | |||
| 165 | ftrace_function_enabled = 1; | ||
| 166 | } | 227 | } |
| 167 | 228 | ||
| 168 | static void tracing_stop_function_trace(void) | 229 | static void tracing_stop_function_trace(struct trace_array *tr) |
| 169 | { | 230 | { |
| 170 | ftrace_function_enabled = 0; | 231 | tr->function_enabled = 0; |
| 171 | 232 | unregister_ftrace_function(tr->ops); | |
| 172 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 173 | unregister_ftrace_function(&trace_stack_ops); | ||
| 174 | else | ||
| 175 | unregister_ftrace_function(&trace_ops); | ||
| 176 | } | 233 | } |
| 177 | 234 | ||
| 178 | static int func_set_flag(u32 old_flags, u32 bit, int set) | 235 | static int |
| 236 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 179 | { | 237 | { |
| 180 | switch (bit) { | 238 | switch (bit) { |
| 181 | case TRACE_FUNC_OPT_STACK: | 239 | case TRACE_FUNC_OPT_STACK: |
| @@ -183,12 +241,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
| 183 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | 241 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
| 184 | break; | 242 | break; |
| 185 | 243 | ||
| 244 | unregister_ftrace_function(tr->ops); | ||
| 245 | |||
| 186 | if (set) { | 246 | if (set) { |
| 187 | unregister_ftrace_function(&trace_ops); | 247 | tr->ops = &trace_stack_ops; |
| 188 | register_ftrace_function(&trace_stack_ops); | 248 | register_ftrace_function(tr->ops); |
| 189 | } else { | 249 | } else { |
| 190 | unregister_ftrace_function(&trace_stack_ops); | 250 | tr->ops = &trace_ops; |
| 191 | register_ftrace_function(&trace_ops); | 251 | register_ftrace_function(tr->ops); |
| 192 | } | 252 | } |
| 193 | 253 | ||
| 194 | break; | 254 | break; |
| @@ -208,6 +268,7 @@ static struct tracer function_trace __tracer_data = | |||
| 208 | .wait_pipe = poll_wait_pipe, | 268 | .wait_pipe = poll_wait_pipe, |
| 209 | .flags = &func_flags, | 269 | .flags = &func_flags, |
| 210 | .set_flag = func_set_flag, | 270 | .set_flag = func_set_flag, |
| 271 | .allow_instances = true, | ||
| 211 | #ifdef CONFIG_FTRACE_SELFTEST | 272 | #ifdef CONFIG_FTRACE_SELFTEST |
| 212 | .selftest = trace_selftest_startup_function, | 273 | .selftest = trace_selftest_startup_function, |
| 213 | #endif | 274 | #endif |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0b99120d395c..deff11200261 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter) | |||
| 1476 | } | 1476 | } |
| 1477 | } | 1477 | } |
| 1478 | 1478 | ||
| 1479 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1479 | static int |
| 1480 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1480 | { | 1481 | { |
| 1481 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1482 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
| 1482 | ftrace_graph_skip_irqs = !set; | 1483 | ftrace_graph_skip_irqs = !set; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2aefbee93a6d..8ff02cbb892f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 160 | #endif /* CONFIG_FUNCTION_TRACER */ | 160 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 163 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 163 | static int |
| 164 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 164 | { | 165 | { |
| 165 | int cpu; | 166 | int cpu; |
| 166 | 167 | ||
| @@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr, | |||
| 266 | #else | 267 | #else |
| 267 | #define __trace_function trace_function | 268 | #define __trace_function trace_function |
| 268 | 269 | ||
| 269 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 270 | static int |
| 271 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 270 | { | 272 | { |
| 271 | return -EINVAL; | 273 | return -EINVAL; |
| 272 | } | 274 | } |
| @@ -498,14 +500,14 @@ void trace_hardirqs_off(void) | |||
| 498 | } | 500 | } |
| 499 | EXPORT_SYMBOL(trace_hardirqs_off); | 501 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 500 | 502 | ||
| 501 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 503 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 502 | { | 504 | { |
| 503 | if (!preempt_trace() && irq_trace()) | 505 | if (!preempt_trace() && irq_trace()) |
| 504 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 506 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 505 | } | 507 | } |
| 506 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 508 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 507 | 509 | ||
| 508 | void trace_hardirqs_off_caller(unsigned long caller_addr) | 510 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 509 | { | 511 | { |
| 510 | if (!preempt_trace() && irq_trace()) | 512 | if (!preempt_trace() && irq_trace()) |
| 511 | start_critical_timing(CALLER_ADDR0, caller_addr); | 513 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| @@ -570,8 +572,10 @@ static void irqsoff_function_set(int set) | |||
| 570 | unregister_irqsoff_function(is_graph()); | 572 | unregister_irqsoff_function(is_graph()); |
| 571 | } | 573 | } |
| 572 | 574 | ||
| 573 | static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) | 575 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 574 | { | 576 | { |
| 577 | struct tracer *tracer = tr->current_trace; | ||
| 578 | |||
| 575 | if (mask & TRACE_ITER_FUNCTION) | 579 | if (mask & TRACE_ITER_FUNCTION) |
| 576 | irqsoff_function_set(set); | 580 | irqsoff_function_set(set); |
| 577 | 581 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bdbae450c13e..903ae28962be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -35,11 +35,6 @@ struct trace_kprobe { | |||
| 35 | struct trace_probe tp; | 35 | struct trace_probe tp; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | struct event_file_link { | ||
| 39 | struct ftrace_event_file *file; | ||
| 40 | struct list_head list; | ||
| 41 | }; | ||
| 42 | |||
| 43 | #define SIZEOF_TRACE_KPROBE(n) \ | 38 | #define SIZEOF_TRACE_KPROBE(n) \ |
| 44 | (offsetof(struct trace_kprobe, tp.args) + \ | 39 | (offsetof(struct trace_kprobe, tp.args) + \ |
| 45 | (sizeof(struct probe_arg) * (n))) | 40 | (sizeof(struct probe_arg) * (n))) |
| @@ -346,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
| 346 | struct trace_kprobe *tk; | 341 | struct trace_kprobe *tk; |
| 347 | 342 | ||
| 348 | list_for_each_entry(tk, &probe_list, list) | 343 | list_for_each_entry(tk, &probe_list, list) |
| 349 | if (strcmp(tk->tp.call.name, event) == 0 && | 344 | if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && |
| 350 | strcmp(tk->tp.call.class->system, group) == 0) | 345 | strcmp(tk->tp.call.class->system, group) == 0) |
| 351 | return tk; | 346 | return tk; |
| 352 | return NULL; | 347 | return NULL; |
| @@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | |||
| 387 | return ret; | 382 | return ret; |
| 388 | } | 383 | } |
| 389 | 384 | ||
| 390 | static struct event_file_link * | ||
| 391 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 392 | { | ||
| 393 | struct event_file_link *link; | ||
| 394 | |||
| 395 | list_for_each_entry(link, &tp->files, list) | ||
| 396 | if (link->file == file) | ||
| 397 | return link; | ||
| 398 | |||
| 399 | return NULL; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | 385 | /* |
| 403 | * Disable trace_probe | 386 | * Disable trace_probe |
| 404 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 387 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
| @@ -533,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk) | |||
| 533 | mutex_lock(&probe_lock); | 516 | mutex_lock(&probe_lock); |
| 534 | 517 | ||
| 535 | /* Delete old (same name) event if exist */ | 518 | /* Delete old (same name) event if exist */ |
| 536 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); | 519 | old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), |
| 520 | tk->tp.call.class->system); | ||
| 537 | if (old_tk) { | 521 | if (old_tk) { |
| 538 | ret = unregister_trace_kprobe(old_tk); | 522 | ret = unregister_trace_kprobe(old_tk); |
| 539 | if (ret < 0) | 523 | if (ret < 0) |
| @@ -581,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, | |||
| 581 | if (ret) | 565 | if (ret) |
| 582 | pr_warning("Failed to re-register probe %s on" | 566 | pr_warning("Failed to re-register probe %s on" |
| 583 | "%s: %d\n", | 567 | "%s: %d\n", |
| 584 | tk->tp.call.name, mod->name, ret); | 568 | ftrace_event_name(&tk->tp.call), |
| 569 | mod->name, ret); | ||
| 585 | } | 570 | } |
| 586 | } | 571 | } |
| 587 | mutex_unlock(&probe_lock); | 572 | mutex_unlock(&probe_lock); |
| @@ -835,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 835 | int i; | 820 | int i; |
| 836 | 821 | ||
| 837 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 822 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 838 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); | 823 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
| 824 | ftrace_event_name(&tk->tp.call)); | ||
| 839 | 825 | ||
| 840 | if (!tk->symbol) | 826 | if (!tk->symbol) |
| 841 | seq_printf(m, " 0x%p", tk->rp.kp.addr); | 827 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
| @@ -893,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 893 | { | 879 | { |
| 894 | struct trace_kprobe *tk = v; | 880 | struct trace_kprobe *tk = v; |
| 895 | 881 | ||
| 896 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, | 882 | seq_printf(m, " %-44s %15lu %15lu\n", |
| 883 | ftrace_event_name(&tk->tp.call), tk->nhit, | ||
| 897 | tk->rp.kp.nmissed); | 884 | tk->rp.kp.nmissed); |
| 898 | 885 | ||
| 899 | return 0; | 886 | return 0; |
| @@ -1028,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
| 1028 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1015 | field = (struct kprobe_trace_entry_head *)iter->ent; |
| 1029 | tp = container_of(event, struct trace_probe, call.event); | 1016 | tp = container_of(event, struct trace_probe, call.event); |
| 1030 | 1017 | ||
| 1031 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1018 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1032 | goto partial; | 1019 | goto partial; |
| 1033 | 1020 | ||
| 1034 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1021 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1064,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
| 1064 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1051 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
| 1065 | tp = container_of(event, struct trace_probe, call.event); | 1052 | tp = container_of(event, struct trace_probe, call.event); |
| 1066 | 1053 | ||
| 1067 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1054 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1068 | goto partial; | 1055 | goto partial; |
| 1069 | 1056 | ||
| 1070 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1057 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1303,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
| 1303 | call->data = tk; | 1290 | call->data = tk; |
| 1304 | ret = trace_add_event_call(call); | 1291 | ret = trace_add_event_call(call); |
| 1305 | if (ret) { | 1292 | if (ret) { |
| 1306 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1293 | pr_info("Failed to register kprobe event: %s\n", |
| 1294 | ftrace_event_name(call)); | ||
| 1307 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
| 1308 | unregister_ftrace_event(&call->event); | 1296 | unregister_ftrace_event(&call->event); |
| 1309 | } | 1297 | } |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 394f94417e2f..69a5cc94c01a 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
| @@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr) | |||
| 62 | * If you don't implement it, then the flag setting will be | 62 | * If you don't implement it, then the flag setting will be |
| 63 | * automatically accepted. | 63 | * automatically accepted. |
| 64 | */ | 64 | */ |
| 65 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | 65 | static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) |
| 66 | { | 66 | { |
| 67 | /* | 67 | /* |
| 68 | * Note that you don't need to update nop_flags.val yourself. | 68 | * Note that you don't need to update nop_flags.val yourself. |
| @@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly = | |||
| 96 | .selftest = trace_selftest_startup_nop, | 96 | .selftest = trace_selftest_startup_nop, |
| 97 | #endif | 97 | #endif |
| 98 | .flags = &nop_flags, | 98 | .flags = &nop_flags, |
| 99 | .set_flag = nop_set_flag | 99 | .set_flag = nop_set_flag, |
| 100 | .allow_instances = true, | ||
| 100 | }; | 101 | }; |
| 101 | 102 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed32284fbe32..a436de18aa99 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | trace_seq_init(p); | 433 | trace_seq_init(p); |
| 434 | ret = trace_seq_printf(s, "%s: ", event->name); | 434 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
| 435 | if (!ret) | 435 | if (!ret) |
| 436 | return TRACE_TYPE_PARTIAL_LINE; | 436 | return TRACE_TYPE_PARTIAL_LINE; |
| 437 | 437 | ||
| @@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
| 441 | 441 | ||
| 442 | static int ftrace_output_raw(struct trace_iterator *iter, char *name, | ||
| 443 | char *fmt, va_list ap) | ||
| 444 | { | ||
| 445 | struct trace_seq *s = &iter->seq; | ||
| 446 | int ret; | ||
| 447 | |||
| 448 | ret = trace_seq_printf(s, "%s: ", name); | ||
| 449 | if (!ret) | ||
| 450 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 451 | |||
| 452 | ret = trace_seq_vprintf(s, fmt, ap); | ||
| 453 | |||
| 454 | if (!ret) | ||
| 455 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 456 | |||
| 457 | return TRACE_TYPE_HANDLED; | ||
| 458 | } | ||
| 459 | |||
| 460 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | ||
| 461 | { | ||
| 462 | va_list ap; | ||
| 463 | int ret; | ||
| 464 | |||
| 465 | va_start(ap, fmt); | ||
| 466 | ret = ftrace_output_raw(iter, name, fmt, ap); | ||
| 467 | va_end(ap); | ||
| 468 | |||
| 469 | return ret; | ||
| 470 | } | ||
| 471 | EXPORT_SYMBOL_GPL(ftrace_output_call); | ||
| 472 | |||
| 442 | #ifdef CONFIG_KRETPROBES | 473 | #ifdef CONFIG_KRETPROBES |
| 443 | static inline const char *kretprobed(const char *name) | 474 | static inline const char *kretprobed(const char *name) |
| 444 | { | 475 | { |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b73574a5f429..fb1ab5dfbd42 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -288,6 +288,11 @@ struct trace_probe { | |||
| 288 | struct probe_arg args[]; | 288 | struct probe_arg args[]; |
| 289 | }; | 289 | }; |
| 290 | 290 | ||
| 291 | struct event_file_link { | ||
| 292 | struct ftrace_event_file *file; | ||
| 293 | struct list_head list; | ||
| 294 | }; | ||
| 295 | |||
| 291 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | 296 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) |
| 292 | { | 297 | { |
| 293 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 298 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); |
| @@ -316,6 +321,18 @@ static inline int is_good_name(const char *name) | |||
| 316 | return 1; | 321 | return 1; |
| 317 | } | 322 | } |
| 318 | 323 | ||
| 324 | static inline struct event_file_link * | ||
| 325 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 326 | { | ||
| 327 | struct event_file_link *link; | ||
| 328 | |||
| 329 | list_for_each_entry(link, &tp->files, list) | ||
| 330 | if (link->file == file) | ||
| 331 | return link; | ||
| 332 | |||
| 333 | return NULL; | ||
| 334 | } | ||
| 335 | |||
| 319 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 336 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 320 | struct probe_arg *parg, bool is_return, bool is_kprobe); | 337 | struct probe_arg *parg, bool is_return, bool is_kprobe); |
| 321 | 338 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 6e32635e5e57..e14da5e97a69 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -179,8 +179,10 @@ static void wakeup_function_set(int set) | |||
| 179 | unregister_wakeup_function(is_graph()); | 179 | unregister_wakeup_function(is_graph()); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | 182 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 183 | { | 183 | { |
| 184 | struct tracer *tracer = tr->current_trace; | ||
| 185 | |||
| 184 | if (mask & TRACE_ITER_FUNCTION) | 186 | if (mask & TRACE_ITER_FUNCTION) |
| 185 | wakeup_function_set(set); | 187 | wakeup_function_set(set); |
| 186 | 188 | ||
| @@ -209,7 +211,8 @@ static void stop_func_tracer(int graph) | |||
| 209 | } | 211 | } |
| 210 | 212 | ||
| 211 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 213 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 212 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 214 | static int |
| 215 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 213 | { | 216 | { |
| 214 | 217 | ||
| 215 | if (!(bit & TRACE_DISPLAY_GRAPH)) | 218 | if (!(bit & TRACE_DISPLAY_GRAPH)) |
| @@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr, | |||
| 311 | #else | 314 | #else |
| 312 | #define __trace_function trace_function | 315 | #define __trace_function trace_function |
| 313 | 316 | ||
| 314 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 317 | static int |
| 318 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 315 | { | 319 | { |
| 316 | return -EINVAL; | 320 | return -EINVAL; |
| 317 | } | 321 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e6be585cf06a..21b320e5d163 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/magic.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
| 18 | 19 | ||
| @@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
| 144 | i++; | 145 | i++; |
| 145 | } | 146 | } |
| 146 | 147 | ||
| 148 | BUG_ON(current != &init_task && | ||
| 149 | *(end_of_stack(current)) != STACK_END_MAGIC); | ||
| 147 | out: | 150 | out: |
| 148 | arch_spin_unlock(&max_stack_lock); | 151 | arch_spin_unlock(&max_stack_lock); |
| 149 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 260 | goto error; | 260 | goto error; |
| 261 | 261 | ||
| 262 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
| 263 | INIT_LIST_HEAD(&tu->tp.files); | ||
| 263 | tu->consumer.handler = uprobe_dispatcher; | 264 | tu->consumer.handler = uprobe_dispatcher; |
| 264 | if (is_ret) | 265 | if (is_ret) |
| 265 | tu->consumer.ret_handler = uretprobe_dispatcher; | 266 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| @@ -293,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
| 293 | struct trace_uprobe *tu; | 294 | struct trace_uprobe *tu; |
| 294 | 295 | ||
| 295 | list_for_each_entry(tu, &uprobe_list, list) | 296 | list_for_each_entry(tu, &uprobe_list, list) |
| 296 | if (strcmp(tu->tp.call.name, event) == 0 && | 297 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && |
| 297 | strcmp(tu->tp.call.class->system, group) == 0) | 298 | strcmp(tu->tp.call.class->system, group) == 0) |
| 298 | return tu; | 299 | return tu; |
| 299 | 300 | ||
| @@ -323,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
| 323 | mutex_lock(&uprobe_lock); | 324 | mutex_lock(&uprobe_lock); |
| 324 | 325 | ||
| 325 | /* register as an event */ | 326 | /* register as an event */ |
| 326 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); | 327 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), |
| 328 | tu->tp.call.class->system); | ||
| 327 | if (old_tu) { | 329 | if (old_tu) { |
| 328 | /* delete old event */ | 330 | /* delete old event */ |
| 329 | ret = unregister_trace_uprobe(old_tu); | 331 | ret = unregister_trace_uprobe(old_tu); |
| @@ -598,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 598 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 600 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
| 599 | int i; | 601 | int i; |
| 600 | 602 | ||
| 601 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); | 603 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
| 604 | ftrace_event_name(&tu->tp.call)); | ||
| 602 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 605 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 603 | 606 | ||
| 604 | for (i = 0; i < tu->tp.nr_args; i++) | 607 | for (i = 0; i < tu->tp.nr_args; i++) |
| @@ -648,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 648 | { | 651 | { |
| 649 | struct trace_uprobe *tu = v; | 652 | struct trace_uprobe *tu = v; |
| 650 | 653 | ||
| 651 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); | 654 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
| 655 | ftrace_event_name(&tu->tp.call), tu->nhit); | ||
| 652 | return 0; | 656 | return 0; |
| 653 | } | 657 | } |
| 654 | 658 | ||
| @@ -758,31 +762,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
| 758 | mutex_unlock(&ucb->mutex); | 762 | mutex_unlock(&ucb->mutex); |
| 759 | } | 763 | } |
| 760 | 764 | ||
| 761 | static void uprobe_trace_print(struct trace_uprobe *tu, | 765 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
| 762 | unsigned long func, struct pt_regs *regs) | 766 | unsigned long func, struct pt_regs *regs, |
| 767 | struct uprobe_cpu_buffer *ucb, int dsize, | ||
| 768 | struct ftrace_event_file *ftrace_file) | ||
| 763 | { | 769 | { |
| 764 | struct uprobe_trace_entry_head *entry; | 770 | struct uprobe_trace_entry_head *entry; |
| 765 | struct ring_buffer_event *event; | 771 | struct ring_buffer_event *event; |
| 766 | struct ring_buffer *buffer; | 772 | struct ring_buffer *buffer; |
| 767 | struct uprobe_cpu_buffer *ucb; | ||
| 768 | void *data; | 773 | void *data; |
| 769 | int size, dsize, esize; | 774 | int size, esize; |
| 770 | struct ftrace_event_call *call = &tu->tp.call; | 775 | struct ftrace_event_call *call = &tu->tp.call; |
| 771 | 776 | ||
| 772 | dsize = __get_data_size(&tu->tp, regs); | 777 | WARN_ON(call != ftrace_file->event_call); |
| 773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 774 | 778 | ||
| 775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) | 779 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
| 776 | return; | 780 | return; |
| 777 | 781 | ||
| 778 | ucb = uprobe_buffer_get(); | 782 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | 783 | return; |
| 780 | 784 | ||
| 785 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 781 | size = esize + tu->tp.size + dsize; | 786 | size = esize + tu->tp.size + dsize; |
| 782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 787 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 783 | size, 0, 0); | 788 | call->event.type, size, 0, 0); |
| 784 | if (!event) | 789 | if (!event) |
| 785 | goto out; | 790 | return; |
| 786 | 791 | ||
| 787 | entry = ring_buffer_event_data(event); | 792 | entry = ring_buffer_event_data(event); |
| 788 | if (is_ret_probe(tu)) { | 793 | if (is_ret_probe(tu)) { |
| @@ -796,25 +801,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 796 | 801 | ||
| 797 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 802 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 798 | 803 | ||
| 799 | if (!call_filter_check_discard(call, entry, buffer, event)) | 804 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); |
| 800 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 801 | |||
| 802 | out: | ||
| 803 | uprobe_buffer_put(ucb); | ||
| 804 | } | 805 | } |
| 805 | 806 | ||
| 806 | /* uprobe handler */ | 807 | /* uprobe handler */ |
| 807 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 808 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 809 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 808 | { | 810 | { |
| 809 | if (!is_ret_probe(tu)) | 811 | struct event_file_link *link; |
| 810 | uprobe_trace_print(tu, 0, regs); | 812 | |
| 813 | if (is_ret_probe(tu)) | ||
| 814 | return 0; | ||
| 815 | |||
| 816 | rcu_read_lock(); | ||
| 817 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 818 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); | ||
| 819 | rcu_read_unlock(); | ||
| 820 | |||
| 811 | return 0; | 821 | return 0; |
| 812 | } | 822 | } |
| 813 | 823 | ||
| 814 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | 824 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
| 815 | struct pt_regs *regs) | 825 | struct pt_regs *regs, |
| 826 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 816 | { | 827 | { |
| 817 | uprobe_trace_print(tu, func, regs); | 828 | struct event_file_link *link; |
| 829 | |||
| 830 | rcu_read_lock(); | ||
| 831 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 832 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); | ||
| 833 | rcu_read_unlock(); | ||
| 818 | } | 834 | } |
| 819 | 835 | ||
| 820 | /* Event entry printers */ | 836 | /* Event entry printers */ |
| @@ -831,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 831 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 847 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 832 | 848 | ||
| 833 | if (is_ret_probe(tu)) { | 849 | if (is_ret_probe(tu)) { |
| 834 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, | 850 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
| 851 | ftrace_event_name(&tu->tp.call), | ||
| 835 | entry->vaddr[1], entry->vaddr[0])) | 852 | entry->vaddr[1], entry->vaddr[0])) |
| 836 | goto partial; | 853 | goto partial; |
| 837 | data = DATAOF_TRACE_ENTRY(entry, true); | 854 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 838 | } else { | 855 | } else { |
| 839 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, | 856 | if (!trace_seq_printf(s, "%s: (0x%lx)", |
| 857 | ftrace_event_name(&tu->tp.call), | ||
| 840 | entry->vaddr[0])) | 858 | entry->vaddr[0])) |
| 841 | goto partial; | 859 | goto partial; |
| 842 | data = DATAOF_TRACE_ENTRY(entry, false); | 860 | data = DATAOF_TRACE_ENTRY(entry, false); |
| @@ -861,12 +879,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
| 861 | struct mm_struct *mm); | 879 | struct mm_struct *mm); |
| 862 | 880 | ||
| 863 | static int | 881 | static int |
| 864 | probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | 882 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, |
| 883 | filter_func_t filter) | ||
| 865 | { | 884 | { |
| 866 | int ret = 0; | 885 | bool enabled = trace_probe_is_enabled(&tu->tp); |
| 886 | struct event_file_link *link = NULL; | ||
| 887 | int ret; | ||
| 888 | |||
| 889 | if (file) { | ||
| 890 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
| 891 | if (!link) | ||
| 892 | return -ENOMEM; | ||
| 867 | 893 | ||
| 868 | if (trace_probe_is_enabled(&tu->tp)) | 894 | link->file = file; |
| 869 | return -EINTR; | 895 | list_add_tail_rcu(&link->list, &tu->tp.files); |
| 896 | |||
| 897 | tu->tp.flags |= TP_FLAG_TRACE; | ||
| 898 | } else | ||
| 899 | tu->tp.flags |= TP_FLAG_PROFILE; | ||
| 870 | 900 | ||
| 871 | ret = uprobe_buffer_enable(); | 901 | ret = uprobe_buffer_enable(); |
| 872 | if (ret < 0) | 902 | if (ret < 0) |
| @@ -874,24 +904,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
| 874 | 904 | ||
| 875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 905 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 876 | 906 | ||
| 877 | tu->tp.flags |= flag; | 907 | if (enabled) |
| 908 | return 0; | ||
| 909 | |||
| 878 | tu->consumer.filter = filter; | 910 | tu->consumer.filter = filter; |
| 879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 911 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 880 | if (ret) | 912 | if (ret) { |
| 881 | tu->tp.flags &= ~flag; | 913 | if (file) { |
| 914 | list_del(&link->list); | ||
| 915 | kfree(link); | ||
| 916 | tu->tp.flags &= ~TP_FLAG_TRACE; | ||
| 917 | } else | ||
| 918 | tu->tp.flags &= ~TP_FLAG_PROFILE; | ||
| 919 | } | ||
| 882 | 920 | ||
| 883 | return ret; | 921 | return ret; |
| 884 | } | 922 | } |
| 885 | 923 | ||
| 886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 924 | static void |
| 925 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | ||
| 887 | { | 926 | { |
| 888 | if (!trace_probe_is_enabled(&tu->tp)) | 927 | if (!trace_probe_is_enabled(&tu->tp)) |
| 889 | return; | 928 | return; |
| 890 | 929 | ||
| 930 | if (file) { | ||
| 931 | struct event_file_link *link; | ||
| 932 | |||
| 933 | link = find_event_file_link(&tu->tp, file); | ||
| 934 | if (!link) | ||
| 935 | return; | ||
| 936 | |||
| 937 | list_del_rcu(&link->list); | ||
| 938 | /* synchronize with u{,ret}probe_trace_func */ | ||
| 939 | synchronize_sched(); | ||
| 940 | kfree(link); | ||
| 941 | |||
| 942 | if (!list_empty(&tu->tp.files)) | ||
| 943 | return; | ||
| 944 | } | ||
| 945 | |||
| 891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 946 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 892 | 947 | ||
| 893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 948 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 894 | tu->tp.flags &= ~flag; | 949 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
| 895 | 950 | ||
| 896 | uprobe_buffer_disable(); | 951 | uprobe_buffer_disable(); |
| 897 | } | 952 | } |
| @@ -1014,31 +1069,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 1014 | return ret; | 1069 | return ret; |
| 1015 | } | 1070 | } |
| 1016 | 1071 | ||
| 1017 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1072 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
| 1018 | unsigned long func, struct pt_regs *regs) | 1073 | unsigned long func, struct pt_regs *regs, |
| 1074 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1019 | { | 1075 | { |
| 1020 | struct ftrace_event_call *call = &tu->tp.call; | 1076 | struct ftrace_event_call *call = &tu->tp.call; |
| 1021 | struct uprobe_trace_entry_head *entry; | 1077 | struct uprobe_trace_entry_head *entry; |
| 1022 | struct hlist_head *head; | 1078 | struct hlist_head *head; |
| 1023 | struct uprobe_cpu_buffer *ucb; | ||
| 1024 | void *data; | 1079 | void *data; |
| 1025 | int size, dsize, esize; | 1080 | int size, esize; |
| 1026 | int rctx; | 1081 | int rctx; |
| 1027 | 1082 | ||
| 1028 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1083 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1030 | 1084 | ||
| 1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1032 | return; | ||
| 1033 | |||
| 1034 | size = esize + tu->tp.size + dsize; | 1085 | size = esize + tu->tp.size + dsize; |
| 1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1086 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 1087 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 1037 | return; | 1088 | return; |
| 1038 | 1089 | ||
| 1039 | ucb = uprobe_buffer_get(); | ||
| 1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1041 | |||
| 1042 | preempt_disable(); | 1090 | preempt_disable(); |
| 1043 | head = this_cpu_ptr(call->perf_events); | 1091 | head = this_cpu_ptr(call->perf_events); |
| 1044 | if (hlist_empty(head)) | 1092 | if (hlist_empty(head)) |
| @@ -1068,46 +1116,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
| 1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1116 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1069 | out: | 1117 | out: |
| 1070 | preempt_enable(); | 1118 | preempt_enable(); |
| 1071 | uprobe_buffer_put(ucb); | ||
| 1072 | } | 1119 | } |
| 1073 | 1120 | ||
| 1074 | /* uprobe profile handler */ | 1121 | /* uprobe profile handler */ |
| 1075 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 1122 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 1123 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1076 | { | 1124 | { |
| 1077 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 1125 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
| 1078 | return UPROBE_HANDLER_REMOVE; | 1126 | return UPROBE_HANDLER_REMOVE; |
| 1079 | 1127 | ||
| 1080 | if (!is_ret_probe(tu)) | 1128 | if (!is_ret_probe(tu)) |
| 1081 | uprobe_perf_print(tu, 0, regs); | 1129 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
| 1082 | return 0; | 1130 | return 0; |
| 1083 | } | 1131 | } |
| 1084 | 1132 | ||
| 1085 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | 1133 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
| 1086 | struct pt_regs *regs) | 1134 | struct pt_regs *regs, |
| 1135 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1087 | { | 1136 | { |
| 1088 | uprobe_perf_print(tu, func, regs); | 1137 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1089 | } | 1138 | } |
| 1090 | #endif /* CONFIG_PERF_EVENTS */ | 1139 | #endif /* CONFIG_PERF_EVENTS */ |
| 1091 | 1140 | ||
| 1092 | static | 1141 | static int |
| 1093 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 1142 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, |
| 1143 | void *data) | ||
| 1094 | { | 1144 | { |
| 1095 | struct trace_uprobe *tu = event->data; | 1145 | struct trace_uprobe *tu = event->data; |
| 1146 | struct ftrace_event_file *file = data; | ||
| 1096 | 1147 | ||
| 1097 | switch (type) { | 1148 | switch (type) { |
| 1098 | case TRACE_REG_REGISTER: | 1149 | case TRACE_REG_REGISTER: |
| 1099 | return probe_event_enable(tu, TP_FLAG_TRACE, NULL); | 1150 | return probe_event_enable(tu, file, NULL); |
| 1100 | 1151 | ||
| 1101 | case TRACE_REG_UNREGISTER: | 1152 | case TRACE_REG_UNREGISTER: |
| 1102 | probe_event_disable(tu, TP_FLAG_TRACE); | 1153 | probe_event_disable(tu, file); |
| 1103 | return 0; | 1154 | return 0; |
| 1104 | 1155 | ||
| 1105 | #ifdef CONFIG_PERF_EVENTS | 1156 | #ifdef CONFIG_PERF_EVENTS |
| 1106 | case TRACE_REG_PERF_REGISTER: | 1157 | case TRACE_REG_PERF_REGISTER: |
| 1107 | return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); | 1158 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
| 1108 | 1159 | ||
| 1109 | case TRACE_REG_PERF_UNREGISTER: | 1160 | case TRACE_REG_PERF_UNREGISTER: |
| 1110 | probe_event_disable(tu, TP_FLAG_PROFILE); | 1161 | probe_event_disable(tu, NULL); |
| 1111 | return 0; | 1162 | return 0; |
| 1112 | 1163 | ||
| 1113 | case TRACE_REG_PERF_OPEN: | 1164 | case TRACE_REG_PERF_OPEN: |
| @@ -1127,8 +1178,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1127 | { | 1178 | { |
| 1128 | struct trace_uprobe *tu; | 1179 | struct trace_uprobe *tu; |
| 1129 | struct uprobe_dispatch_data udd; | 1180 | struct uprobe_dispatch_data udd; |
| 1181 | struct uprobe_cpu_buffer *ucb; | ||
| 1182 | int dsize, esize; | ||
| 1130 | int ret = 0; | 1183 | int ret = 0; |
| 1131 | 1184 | ||
| 1185 | |||
| 1132 | tu = container_of(con, struct trace_uprobe, consumer); | 1186 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1133 | tu->nhit++; | 1187 | tu->nhit++; |
| 1134 | 1188 | ||
| @@ -1137,13 +1191,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1137 | 1191 | ||
| 1138 | current->utask->vaddr = (unsigned long) &udd; | 1192 | current->utask->vaddr = (unsigned long) &udd; |
| 1139 | 1193 | ||
| 1194 | #ifdef CONFIG_PERF_EVENTS | ||
| 1195 | if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && | ||
| 1196 | !uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
| 1197 | return UPROBE_HANDLER_REMOVE; | ||
| 1198 | #endif | ||
| 1199 | |||
| 1200 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1201 | return 0; | ||
| 1202 | |||
| 1203 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1204 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1205 | |||
| 1206 | ucb = uprobe_buffer_get(); | ||
| 1207 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1208 | |||
| 1140 | if (tu->tp.flags & TP_FLAG_TRACE) | 1209 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1141 | ret |= uprobe_trace_func(tu, regs); | 1210 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
| 1142 | 1211 | ||
| 1143 | #ifdef CONFIG_PERF_EVENTS | 1212 | #ifdef CONFIG_PERF_EVENTS |
| 1144 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1213 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1145 | ret |= uprobe_perf_func(tu, regs); | 1214 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
| 1146 | #endif | 1215 | #endif |
| 1216 | uprobe_buffer_put(ucb); | ||
| 1147 | return ret; | 1217 | return ret; |
| 1148 | } | 1218 | } |
| 1149 | 1219 | ||
| @@ -1152,6 +1222,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1152 | { | 1222 | { |
| 1153 | struct trace_uprobe *tu; | 1223 | struct trace_uprobe *tu; |
| 1154 | struct uprobe_dispatch_data udd; | 1224 | struct uprobe_dispatch_data udd; |
| 1225 | struct uprobe_cpu_buffer *ucb; | ||
| 1226 | int dsize, esize; | ||
| 1155 | 1227 | ||
| 1156 | tu = container_of(con, struct trace_uprobe, consumer); | 1228 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1157 | 1229 | ||
| @@ -1160,13 +1232,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1160 | 1232 | ||
| 1161 | current->utask->vaddr = (unsigned long) &udd; | 1233 | current->utask->vaddr = (unsigned long) &udd; |
| 1162 | 1234 | ||
| 1235 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1236 | return 0; | ||
| 1237 | |||
| 1238 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1239 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1240 | |||
| 1241 | ucb = uprobe_buffer_get(); | ||
| 1242 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1243 | |||
| 1163 | if (tu->tp.flags & TP_FLAG_TRACE) | 1244 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1164 | uretprobe_trace_func(tu, func, regs); | 1245 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
| 1165 | 1246 | ||
| 1166 | #ifdef CONFIG_PERF_EVENTS | 1247 | #ifdef CONFIG_PERF_EVENTS |
| 1167 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1248 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1168 | uretprobe_perf_func(tu, func, regs); | 1249 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1169 | #endif | 1250 | #endif |
| 1251 | uprobe_buffer_put(ucb); | ||
| 1170 | return 0; | 1252 | return 0; |
| 1171 | } | 1253 | } |
| 1172 | 1254 | ||
| @@ -1198,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
| 1198 | ret = trace_add_event_call(call); | 1280 | ret = trace_add_event_call(call); |
| 1199 | 1281 | ||
| 1200 | if (ret) { | 1282 | if (ret) { |
| 1201 | pr_info("Failed to register uprobe event: %s\n", call->name); | 1283 | pr_info("Failed to register uprobe event: %s\n", |
| 1284 | ftrace_event_name(call)); | ||
| 1202 | kfree(call->print_fmt); | 1285 | kfree(call->print_fmt); |
| 1203 | unregister_ftrace_event(&call->event); | 1286 | unregister_ftrace_event(&call->event); |
| 1204 | } | 1287 | } |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..ac5b23cf7212 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2008 Mathieu Desnoyers | 2 | * Copyright (C) 2008-2014 Mathieu Desnoyers |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
| @@ -33,43 +33,29 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[]; | |||
| 33 | /* Set to 1 to enable tracepoint debug output */ | 33 | /* Set to 1 to enable tracepoint debug output */ |
| 34 | static const int tracepoint_debug; | 34 | static const int tracepoint_debug; |
| 35 | 35 | ||
| 36 | #ifdef CONFIG_MODULES | ||
| 36 | /* | 37 | /* |
| 37 | * Tracepoints mutex protects the builtin and module tracepoints and the hash | 38 | * Tracepoint module list mutex protects the local module list. |
| 38 | * table, as well as the local module list. | ||
| 39 | */ | 39 | */ |
| 40 | static DEFINE_MUTEX(tracepoints_mutex); | 40 | static DEFINE_MUTEX(tracepoint_module_list_mutex); |
| 41 | 41 | ||
| 42 | #ifdef CONFIG_MODULES | 42 | /* Local list of struct tp_module */ |
| 43 | /* Local list of struct module */ | ||
| 44 | static LIST_HEAD(tracepoint_module_list); | 43 | static LIST_HEAD(tracepoint_module_list); |
| 45 | #endif /* CONFIG_MODULES */ | 44 | #endif /* CONFIG_MODULES */ |
| 46 | 45 | ||
| 47 | /* | 46 | /* |
| 48 | * Tracepoint hash table, containing the active tracepoints. | 47 | * tracepoints_mutex protects the builtin and module tracepoints. |
| 49 | * Protected by tracepoints_mutex. | 48 | * tracepoints_mutex nests inside tracepoint_module_list_mutex. |
| 50 | */ | 49 | */ |
| 51 | #define TRACEPOINT_HASH_BITS 6 | 50 | static DEFINE_MUTEX(tracepoints_mutex); |
| 52 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | ||
| 53 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | ||
| 54 | 51 | ||
| 55 | /* | 52 | /* |
| 56 | * Note about RCU : | 53 | * Note about RCU : |
| 57 | * It is used to delay the free of multiple probes array until a quiescent | 54 | * It is used to delay the free of multiple probes array until a quiescent |
| 58 | * state is reached. | 55 | * state is reached. |
| 59 | * Tracepoint entries modifications are protected by the tracepoints_mutex. | ||
| 60 | */ | 56 | */ |
| 61 | struct tracepoint_entry { | ||
| 62 | struct hlist_node hlist; | ||
| 63 | struct tracepoint_func *funcs; | ||
| 64 | int refcount; /* Number of times armed. 0 if disarmed. */ | ||
| 65 | char name[0]; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct tp_probes { | 57 | struct tp_probes { |
| 69 | union { | 58 | struct rcu_head rcu; |
| 70 | struct rcu_head rcu; | ||
| 71 | struct list_head list; | ||
| 72 | } u; | ||
| 73 | struct tracepoint_func probes[0]; | 59 | struct tracepoint_func probes[0]; |
| 74 | }; | 60 | }; |
| 75 | 61 | ||
| @@ -82,7 +68,7 @@ static inline void *allocate_probes(int count) | |||
| 82 | 68 | ||
| 83 | static void rcu_free_old_probes(struct rcu_head *head) | 69 | static void rcu_free_old_probes(struct rcu_head *head) |
| 84 | { | 70 | { |
| 85 | kfree(container_of(head, struct tp_probes, u.rcu)); | 71 | kfree(container_of(head, struct tp_probes, rcu)); |
| 86 | } | 72 | } |
| 87 | 73 | ||
| 88 | static inline void release_probes(struct tracepoint_func *old) | 74 | static inline void release_probes(struct tracepoint_func *old) |
| @@ -90,38 +76,37 @@ static inline void release_probes(struct tracepoint_func *old) | |||
| 90 | if (old) { | 76 | if (old) { |
| 91 | struct tp_probes *tp_probes = container_of(old, | 77 | struct tp_probes *tp_probes = container_of(old, |
| 92 | struct tp_probes, probes[0]); | 78 | struct tp_probes, probes[0]); |
| 93 | call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); | 79 | call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); |
| 94 | } | 80 | } |
| 95 | } | 81 | } |
| 96 | 82 | ||
| 97 | static void debug_print_probes(struct tracepoint_entry *entry) | 83 | static void debug_print_probes(struct tracepoint_func *funcs) |
| 98 | { | 84 | { |
| 99 | int i; | 85 | int i; |
| 100 | 86 | ||
| 101 | if (!tracepoint_debug || !entry->funcs) | 87 | if (!tracepoint_debug || !funcs) |
| 102 | return; | 88 | return; |
| 103 | 89 | ||
| 104 | for (i = 0; entry->funcs[i].func; i++) | 90 | for (i = 0; funcs[i].func; i++) |
| 105 | printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); | 91 | printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); |
| 106 | } | 92 | } |
| 107 | 93 | ||
| 108 | static struct tracepoint_func * | 94 | static struct tracepoint_func *func_add(struct tracepoint_func **funcs, |
| 109 | tracepoint_entry_add_probe(struct tracepoint_entry *entry, | 95 | struct tracepoint_func *tp_func) |
| 110 | void *probe, void *data) | ||
| 111 | { | 96 | { |
| 112 | int nr_probes = 0; | 97 | int nr_probes = 0; |
| 113 | struct tracepoint_func *old, *new; | 98 | struct tracepoint_func *old, *new; |
| 114 | 99 | ||
| 115 | if (WARN_ON(!probe)) | 100 | if (WARN_ON(!tp_func->func)) |
| 116 | return ERR_PTR(-EINVAL); | 101 | return ERR_PTR(-EINVAL); |
| 117 | 102 | ||
| 118 | debug_print_probes(entry); | 103 | debug_print_probes(*funcs); |
| 119 | old = entry->funcs; | 104 | old = *funcs; |
| 120 | if (old) { | 105 | if (old) { |
| 121 | /* (N -> N+1), (N != 0, 1) probes */ | 106 | /* (N -> N+1), (N != 0, 1) probes */ |
| 122 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) | 107 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) |
| 123 | if (old[nr_probes].func == probe && | 108 | if (old[nr_probes].func == tp_func->func && |
| 124 | old[nr_probes].data == data) | 109 | old[nr_probes].data == tp_func->data) |
| 125 | return ERR_PTR(-EEXIST); | 110 | return ERR_PTR(-EEXIST); |
| 126 | } | 111 | } |
| 127 | /* + 2 : one for new probe, one for NULL func */ | 112 | /* + 2 : one for new probe, one for NULL func */ |
| @@ -130,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, | |||
| 130 | return ERR_PTR(-ENOMEM); | 115 | return ERR_PTR(-ENOMEM); |
| 131 | if (old) | 116 | if (old) |
| 132 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); | 117 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); |
| 133 | new[nr_probes].func = probe; | 118 | new[nr_probes] = *tp_func; |
| 134 | new[nr_probes].data = data; | ||
| 135 | new[nr_probes + 1].func = NULL; | 119 | new[nr_probes + 1].func = NULL; |
| 136 | entry->refcount = nr_probes + 1; | 120 | *funcs = new; |
| 137 | entry->funcs = new; | 121 | debug_print_probes(*funcs); |
| 138 | debug_print_probes(entry); | ||
| 139 | return old; | 122 | return old; |
| 140 | } | 123 | } |
| 141 | 124 | ||
| 142 | static void * | 125 | static void *func_remove(struct tracepoint_func **funcs, |
| 143 | tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | 126 | struct tracepoint_func *tp_func) |
| 144 | void *probe, void *data) | ||
| 145 | { | 127 | { |
| 146 | int nr_probes = 0, nr_del = 0, i; | 128 | int nr_probes = 0, nr_del = 0, i; |
| 147 | struct tracepoint_func *old, *new; | 129 | struct tracepoint_func *old, *new; |
| 148 | 130 | ||
| 149 | old = entry->funcs; | 131 | old = *funcs; |
| 150 | 132 | ||
| 151 | if (!old) | 133 | if (!old) |
| 152 | return ERR_PTR(-ENOENT); | 134 | return ERR_PTR(-ENOENT); |
| 153 | 135 | ||
| 154 | debug_print_probes(entry); | 136 | debug_print_probes(*funcs); |
| 155 | /* (N -> M), (N > 1, M >= 0) probes */ | 137 | /* (N -> M), (N > 1, M >= 0) probes */ |
| 156 | if (probe) { | 138 | if (tp_func->func) { |
| 157 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { | 139 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
| 158 | if (old[nr_probes].func == probe && | 140 | if (old[nr_probes].func == tp_func->func && |
| 159 | old[nr_probes].data == data) | 141 | old[nr_probes].data == tp_func->data) |
| 160 | nr_del++; | 142 | nr_del++; |
| 161 | } | 143 | } |
| 162 | } | 144 | } |
| @@ -167,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
| 167 | */ | 149 | */ |
| 168 | if (nr_probes - nr_del == 0) { | 150 | if (nr_probes - nr_del == 0) { |
| 169 | /* N -> 0, (N > 1) */ | 151 | /* N -> 0, (N > 1) */ |
| 170 | entry->funcs = NULL; | 152 | *funcs = NULL; |
| 171 | entry->refcount = 0; | 153 | debug_print_probes(*funcs); |
| 172 | debug_print_probes(entry); | ||
| 173 | return old; | 154 | return old; |
| 174 | } else { | 155 | } else { |
| 175 | int j = 0; | 156 | int j = 0; |
| @@ -179,90 +160,35 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
| 179 | if (new == NULL) | 160 | if (new == NULL) |
| 180 | return ERR_PTR(-ENOMEM); | 161 | return ERR_PTR(-ENOMEM); |
| 181 | for (i = 0; old[i].func; i++) | 162 | for (i = 0; old[i].func; i++) |
| 182 | if (old[i].func != probe || old[i].data != data) | 163 | if (old[i].func != tp_func->func |
| 164 | || old[i].data != tp_func->data) | ||
| 183 | new[j++] = old[i]; | 165 | new[j++] = old[i]; |
| 184 | new[nr_probes - nr_del].func = NULL; | 166 | new[nr_probes - nr_del].func = NULL; |
| 185 | entry->refcount = nr_probes - nr_del; | 167 | *funcs = new; |
| 186 | entry->funcs = new; | ||
| 187 | } | 168 | } |
| 188 | debug_print_probes(entry); | 169 | debug_print_probes(*funcs); |
| 189 | return old; | 170 | return old; |
| 190 | } | 171 | } |
| 191 | 172 | ||
| 192 | /* | 173 | /* |
| 193 | * Get tracepoint if the tracepoint is present in the tracepoint hash table. | 174 | * Add the probe function to a tracepoint. |
| 194 | * Must be called with tracepoints_mutex held. | ||
| 195 | * Returns NULL if not present. | ||
| 196 | */ | 175 | */ |
| 197 | static struct tracepoint_entry *get_tracepoint(const char *name) | 176 | static int tracepoint_add_func(struct tracepoint *tp, |
| 177 | struct tracepoint_func *func) | ||
| 198 | { | 178 | { |
| 199 | struct hlist_head *head; | 179 | struct tracepoint_func *old, *tp_funcs; |
| 200 | struct tracepoint_entry *e; | ||
| 201 | u32 hash = jhash(name, strlen(name), 0); | ||
| 202 | |||
| 203 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
| 204 | hlist_for_each_entry(e, head, hlist) { | ||
| 205 | if (!strcmp(name, e->name)) | ||
| 206 | return e; | ||
| 207 | } | ||
| 208 | return NULL; | ||
| 209 | } | ||
| 210 | 180 | ||
| 211 | /* | 181 | if (tp->regfunc && !static_key_enabled(&tp->key)) |
| 212 | * Add the tracepoint to the tracepoint hash table. Must be called with | 182 | tp->regfunc(); |
| 213 | * tracepoints_mutex held. | ||
| 214 | */ | ||
| 215 | static struct tracepoint_entry *add_tracepoint(const char *name) | ||
| 216 | { | ||
| 217 | struct hlist_head *head; | ||
| 218 | struct tracepoint_entry *e; | ||
| 219 | size_t name_len = strlen(name) + 1; | ||
| 220 | u32 hash = jhash(name, name_len-1, 0); | ||
| 221 | |||
| 222 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
| 223 | hlist_for_each_entry(e, head, hlist) { | ||
| 224 | if (!strcmp(name, e->name)) { | ||
| 225 | printk(KERN_NOTICE | ||
| 226 | "tracepoint %s busy\n", name); | ||
| 227 | return ERR_PTR(-EEXIST); /* Already there */ | ||
| 228 | } | ||
| 229 | } | ||
| 230 | /* | ||
| 231 | * Using kmalloc here to allocate a variable length element. Could | ||
| 232 | * cause some memory fragmentation if overused. | ||
| 233 | */ | ||
| 234 | e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); | ||
| 235 | if (!e) | ||
| 236 | return ERR_PTR(-ENOMEM); | ||
| 237 | memcpy(&e->name[0], name, name_len); | ||
| 238 | e->funcs = NULL; | ||
| 239 | e->refcount = 0; | ||
| 240 | hlist_add_head(&e->hlist, head); | ||
| 241 | return e; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* | ||
| 245 | * Remove the tracepoint from the tracepoint hash table. Must be called with | ||
| 246 | * mutex_lock held. | ||
| 247 | */ | ||
| 248 | static inline void remove_tracepoint(struct tracepoint_entry *e) | ||
| 249 | { | ||
| 250 | hlist_del(&e->hlist); | ||
| 251 | kfree(e); | ||
| 252 | } | ||
| 253 | 183 | ||
| 254 | /* | 184 | tp_funcs = rcu_dereference_protected(tp->funcs, |
| 255 | * Sets the probe callback corresponding to one tracepoint. | 185 | lockdep_is_held(&tracepoints_mutex)); |
| 256 | */ | 186 | old = func_add(&tp_funcs, func); |
| 257 | static void set_tracepoint(struct tracepoint_entry **entry, | 187 | if (IS_ERR(old)) { |
| 258 | struct tracepoint *elem, int active) | 188 | WARN_ON_ONCE(1); |
| 259 | { | 189 | return PTR_ERR(old); |
| 260 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 190 | } |
| 261 | 191 | release_probes(old); | |
| 262 | if (elem->regfunc && !static_key_enabled(&elem->key) && active) | ||
| 263 | elem->regfunc(); | ||
| 264 | else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) | ||
| 265 | elem->unregfunc(); | ||
| 266 | 192 | ||
| 267 | /* | 193 | /* |
| 268 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new | 194 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new |
| @@ -271,421 +197,215 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 271 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() | 197 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() |
| 272 | * is used. | 198 | * is used. |
| 273 | */ | 199 | */ |
| 274 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 200 | rcu_assign_pointer(tp->funcs, tp_funcs); |
| 275 | if (active && !static_key_enabled(&elem->key)) | 201 | if (!static_key_enabled(&tp->key)) |
| 276 | static_key_slow_inc(&elem->key); | 202 | static_key_slow_inc(&tp->key); |
| 277 | else if (!active && static_key_enabled(&elem->key)) | 203 | return 0; |
| 278 | static_key_slow_dec(&elem->key); | ||
| 279 | } | 204 | } |
| 280 | 205 | ||
| 281 | /* | 206 | /* |
| 282 | * Disable a tracepoint and its probe callback. | 207 | * Remove a probe function from a tracepoint. |
| 283 | * Note: only waiting an RCU period after setting elem->call to the empty | 208 | * Note: only waiting an RCU period after setting elem->call to the empty |
| 284 | * function insures that the original callback is not used anymore. This insured | 209 | * function insures that the original callback is not used anymore. This insured |
| 285 | * by preempt_disable around the call site. | 210 | * by preempt_disable around the call site. |
| 286 | */ | 211 | */ |
| 287 | static void disable_tracepoint(struct tracepoint *elem) | 212 | static int tracepoint_remove_func(struct tracepoint *tp, |
| 288 | { | 213 | struct tracepoint_func *func) |
| 289 | if (elem->unregfunc && static_key_enabled(&elem->key)) | ||
| 290 | elem->unregfunc(); | ||
| 291 | |||
| 292 | if (static_key_enabled(&elem->key)) | ||
| 293 | static_key_slow_dec(&elem->key); | ||
| 294 | rcu_assign_pointer(elem->funcs, NULL); | ||
| 295 | } | ||
| 296 | |||
| 297 | /** | ||
| 298 | * tracepoint_update_probe_range - Update a probe range | ||
| 299 | * @begin: beginning of the range | ||
| 300 | * @end: end of the range | ||
| 301 | * | ||
| 302 | * Updates the probe callback corresponding to a range of tracepoints. | ||
| 303 | * Called with tracepoints_mutex held. | ||
| 304 | */ | ||
| 305 | static void tracepoint_update_probe_range(struct tracepoint * const *begin, | ||
| 306 | struct tracepoint * const *end) | ||
| 307 | { | 214 | { |
| 308 | struct tracepoint * const *iter; | 215 | struct tracepoint_func *old, *tp_funcs; |
| 309 | struct tracepoint_entry *mark_entry; | ||
| 310 | |||
| 311 | if (!begin) | ||
| 312 | return; | ||
| 313 | 216 | ||
| 314 | for (iter = begin; iter < end; iter++) { | 217 | tp_funcs = rcu_dereference_protected(tp->funcs, |
| 315 | mark_entry = get_tracepoint((*iter)->name); | 218 | lockdep_is_held(&tracepoints_mutex)); |
| 316 | if (mark_entry) { | 219 | old = func_remove(&tp_funcs, func); |
| 317 | set_tracepoint(&mark_entry, *iter, | 220 | if (IS_ERR(old)) { |
| 318 | !!mark_entry->refcount); | 221 | WARN_ON_ONCE(1); |
| 319 | } else { | 222 | return PTR_ERR(old); |
| 320 | disable_tracepoint(*iter); | ||
| 321 | } | ||
| 322 | } | 223 | } |
| 323 | } | 224 | release_probes(old); |
| 324 | |||
| 325 | #ifdef CONFIG_MODULES | ||
| 326 | void module_update_tracepoints(void) | ||
| 327 | { | ||
| 328 | struct tp_module *tp_mod; | ||
| 329 | |||
| 330 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
| 331 | tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, | ||
| 332 | tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); | ||
| 333 | } | ||
| 334 | #else /* CONFIG_MODULES */ | ||
| 335 | void module_update_tracepoints(void) | ||
| 336 | { | ||
| 337 | } | ||
| 338 | #endif /* CONFIG_MODULES */ | ||
| 339 | |||
| 340 | 225 | ||
| 341 | /* | 226 | if (!tp_funcs) { |
| 342 | * Update probes, removing the faulty probes. | 227 | /* Removed last function */ |
| 343 | * Called with tracepoints_mutex held. | 228 | if (tp->unregfunc && static_key_enabled(&tp->key)) |
| 344 | */ | 229 | tp->unregfunc(); |
| 345 | static void tracepoint_update_probes(void) | ||
| 346 | { | ||
| 347 | /* Core kernel tracepoints */ | ||
| 348 | tracepoint_update_probe_range(__start___tracepoints_ptrs, | ||
| 349 | __stop___tracepoints_ptrs); | ||
| 350 | /* tracepoints in modules. */ | ||
| 351 | module_update_tracepoints(); | ||
| 352 | } | ||
| 353 | 230 | ||
| 354 | static struct tracepoint_func * | 231 | if (static_key_enabled(&tp->key)) |
| 355 | tracepoint_add_probe(const char *name, void *probe, void *data) | 232 | static_key_slow_dec(&tp->key); |
| 356 | { | ||
| 357 | struct tracepoint_entry *entry; | ||
| 358 | struct tracepoint_func *old; | ||
| 359 | |||
| 360 | entry = get_tracepoint(name); | ||
| 361 | if (!entry) { | ||
| 362 | entry = add_tracepoint(name); | ||
| 363 | if (IS_ERR(entry)) | ||
| 364 | return (struct tracepoint_func *)entry; | ||
| 365 | } | 233 | } |
| 366 | old = tracepoint_entry_add_probe(entry, probe, data); | 234 | rcu_assign_pointer(tp->funcs, tp_funcs); |
| 367 | if (IS_ERR(old) && !entry->refcount) | 235 | return 0; |
| 368 | remove_tracepoint(entry); | ||
| 369 | return old; | ||
| 370 | } | 236 | } |
| 371 | 237 | ||
| 372 | /** | 238 | /** |
| 373 | * tracepoint_probe_register - Connect a probe to a tracepoint | 239 | * tracepoint_probe_register - Connect a probe to a tracepoint |
| 374 | * @name: tracepoint name | 240 | * @tp: tracepoint |
| 375 | * @probe: probe handler | 241 | * @probe: probe handler |
| 376 | * | 242 | * |
| 377 | * Returns 0 if ok, error value on error. | 243 | * Returns 0 if ok, error value on error. |
| 378 | * The probe address must at least be aligned on the architecture pointer size. | 244 | * Note: if @tp is within a module, the caller is responsible for |
| 245 | * unregistering the probe before the module is gone. This can be | ||
| 246 | * performed either with a tracepoint module going notifier, or from | ||
| 247 | * within module exit functions. | ||
| 379 | */ | 248 | */ |
| 380 | int tracepoint_probe_register(const char *name, void *probe, void *data) | 249 | int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) |
| 381 | { | 250 | { |
| 382 | struct tracepoint_func *old; | 251 | struct tracepoint_func tp_func; |
| 252 | int ret; | ||
| 383 | 253 | ||
| 384 | mutex_lock(&tracepoints_mutex); | 254 | mutex_lock(&tracepoints_mutex); |
| 385 | old = tracepoint_add_probe(name, probe, data); | 255 | tp_func.func = probe; |
| 386 | if (IS_ERR(old)) { | 256 | tp_func.data = data; |
| 387 | mutex_unlock(&tracepoints_mutex); | 257 | ret = tracepoint_add_func(tp, &tp_func); |
| 388 | return PTR_ERR(old); | ||
| 389 | } | ||
| 390 | tracepoint_update_probes(); /* may update entry */ | ||
| 391 | mutex_unlock(&tracepoints_mutex); | 258 | mutex_unlock(&tracepoints_mutex); |
| 392 | release_probes(old); | 259 | return ret; |
| 393 | return 0; | ||
| 394 | } | 260 | } |
| 395 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 261 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
| 396 | 262 | ||
| 397 | static struct tracepoint_func * | ||
| 398 | tracepoint_remove_probe(const char *name, void *probe, void *data) | ||
| 399 | { | ||
| 400 | struct tracepoint_entry *entry; | ||
| 401 | struct tracepoint_func *old; | ||
| 402 | |||
| 403 | entry = get_tracepoint(name); | ||
| 404 | if (!entry) | ||
| 405 | return ERR_PTR(-ENOENT); | ||
| 406 | old = tracepoint_entry_remove_probe(entry, probe, data); | ||
| 407 | if (IS_ERR(old)) | ||
| 408 | return old; | ||
| 409 | if (!entry->refcount) | ||
| 410 | remove_tracepoint(entry); | ||
| 411 | return old; | ||
| 412 | } | ||
| 413 | |||
| 414 | /** | 263 | /** |
| 415 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 264 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
| 416 | * @name: tracepoint name | 265 | * @tp: tracepoint |
| 417 | * @probe: probe function pointer | 266 | * @probe: probe function pointer |
| 418 | * | 267 | * |
| 419 | * We do not need to call a synchronize_sched to make sure the probes have | 268 | * Returns 0 if ok, error value on error. |
| 420 | * finished running before doing a module unload, because the module unload | ||
| 421 | * itself uses stop_machine(), which insures that every preempt disabled section | ||
| 422 | * have finished. | ||
| 423 | */ | 269 | */ |
| 424 | int tracepoint_probe_unregister(const char *name, void *probe, void *data) | 270 | int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) |
| 425 | { | 271 | { |
| 426 | struct tracepoint_func *old; | 272 | struct tracepoint_func tp_func; |
| 273 | int ret; | ||
| 427 | 274 | ||
| 428 | mutex_lock(&tracepoints_mutex); | 275 | mutex_lock(&tracepoints_mutex); |
| 429 | old = tracepoint_remove_probe(name, probe, data); | 276 | tp_func.func = probe; |
| 430 | if (IS_ERR(old)) { | 277 | tp_func.data = data; |
| 431 | mutex_unlock(&tracepoints_mutex); | 278 | ret = tracepoint_remove_func(tp, &tp_func); |
| 432 | return PTR_ERR(old); | ||
| 433 | } | ||
| 434 | tracepoint_update_probes(); /* may update entry */ | ||
| 435 | mutex_unlock(&tracepoints_mutex); | 279 | mutex_unlock(&tracepoints_mutex); |
| 436 | release_probes(old); | 280 | return ret; |
| 437 | return 0; | ||
| 438 | } | 281 | } |
| 439 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 282 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
| 440 | 283 | ||
| 441 | static LIST_HEAD(old_probes); | 284 | #ifdef CONFIG_MODULES |
| 442 | static int need_update; | 285 | bool trace_module_has_bad_taint(struct module *mod) |
| 443 | |||
| 444 | static void tracepoint_add_old_probes(void *old) | ||
| 445 | { | 286 | { |
| 446 | need_update = 1; | 287 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | |
| 447 | if (old) { | 288 | (1 << TAINT_UNSIGNED_MODULE)); |
| 448 | struct tp_probes *tp_probes = container_of(old, | ||
| 449 | struct tp_probes, probes[0]); | ||
| 450 | list_add(&tp_probes->u.list, &old_probes); | ||
| 451 | } | ||
| 452 | } | 289 | } |
| 453 | 290 | ||
| 454 | /** | 291 | static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); |
| 455 | * tracepoint_probe_register_noupdate - register a probe but not connect | ||
| 456 | * @name: tracepoint name | ||
| 457 | * @probe: probe handler | ||
| 458 | * | ||
| 459 | * caller must call tracepoint_probe_update_all() | ||
| 460 | */ | ||
| 461 | int tracepoint_probe_register_noupdate(const char *name, void *probe, | ||
| 462 | void *data) | ||
| 463 | { | ||
| 464 | struct tracepoint_func *old; | ||
| 465 | |||
| 466 | mutex_lock(&tracepoints_mutex); | ||
| 467 | old = tracepoint_add_probe(name, probe, data); | ||
| 468 | if (IS_ERR(old)) { | ||
| 469 | mutex_unlock(&tracepoints_mutex); | ||
| 470 | return PTR_ERR(old); | ||
| 471 | } | ||
| 472 | tracepoint_add_old_probes(old); | ||
| 473 | mutex_unlock(&tracepoints_mutex); | ||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); | ||
| 477 | 292 | ||
| 478 | /** | 293 | /** |
| 479 | * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect | 294 | * register_tracepoint_notifier - register tracepoint coming/going notifier |
| 480 | * @name: tracepoint name | 295 | * @nb: notifier block |
| 481 | * @probe: probe function pointer | ||
| 482 | * | 296 | * |
| 483 | * caller must call tracepoint_probe_update_all() | 297 | * Notifiers registered with this function are called on module |
| 298 | * coming/going with the tracepoint_module_list_mutex held. | ||
| 299 | * The notifier block callback should expect a "struct tp_module" data | ||
| 300 | * pointer. | ||
| 484 | */ | 301 | */ |
| 485 | int tracepoint_probe_unregister_noupdate(const char *name, void *probe, | 302 | int register_tracepoint_module_notifier(struct notifier_block *nb) |
| 486 | void *data) | ||
| 487 | { | 303 | { |
| 488 | struct tracepoint_func *old; | 304 | struct tp_module *tp_mod; |
| 489 | 305 | int ret; | |
| 490 | mutex_lock(&tracepoints_mutex); | ||
| 491 | old = tracepoint_remove_probe(name, probe, data); | ||
| 492 | if (IS_ERR(old)) { | ||
| 493 | mutex_unlock(&tracepoints_mutex); | ||
| 494 | return PTR_ERR(old); | ||
| 495 | } | ||
| 496 | tracepoint_add_old_probes(old); | ||
| 497 | mutex_unlock(&tracepoints_mutex); | ||
| 498 | return 0; | ||
| 499 | } | ||
| 500 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); | ||
| 501 | |||
| 502 | /** | ||
| 503 | * tracepoint_probe_update_all - update tracepoints | ||
| 504 | */ | ||
| 505 | void tracepoint_probe_update_all(void) | ||
| 506 | { | ||
| 507 | LIST_HEAD(release_probes); | ||
| 508 | struct tp_probes *pos, *next; | ||
| 509 | 306 | ||
| 510 | mutex_lock(&tracepoints_mutex); | 307 | mutex_lock(&tracepoint_module_list_mutex); |
| 511 | if (!need_update) { | 308 | ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); |
| 512 | mutex_unlock(&tracepoints_mutex); | 309 | if (ret) |
| 513 | return; | 310 | goto end; |
| 514 | } | 311 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) |
| 515 | if (!list_empty(&old_probes)) | 312 | (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); |
| 516 | list_replace_init(&old_probes, &release_probes); | 313 | end: |
| 517 | need_update = 0; | 314 | mutex_unlock(&tracepoint_module_list_mutex); |
| 518 | tracepoint_update_probes(); | 315 | return ret; |
| 519 | mutex_unlock(&tracepoints_mutex); | ||
| 520 | list_for_each_entry_safe(pos, next, &release_probes, u.list) { | ||
| 521 | list_del(&pos->u.list); | ||
| 522 | call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); | ||
| 523 | } | ||
| 524 | } | 316 | } |
| 525 | EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | 317 | EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); |
| 526 | 318 | ||
| 527 | /** | 319 | /** |
| 528 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. | 320 | * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier |
| 529 | * @tracepoint: current tracepoints (in), next tracepoint (out) | 321 | * @nb: notifier block |
| 530 | * @begin: beginning of the range | ||
| 531 | * @end: end of the range | ||
| 532 | * | 322 | * |
| 533 | * Returns whether a next tracepoint has been found (1) or not (0). | 323 | * The notifier block callback should expect a "struct tp_module" data |
| 534 | * Will return the first tracepoint in the range if the input tracepoint is | 324 | * pointer. |
| 535 | * NULL. | ||
| 536 | */ | 325 | */ |
| 537 | static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, | 326 | int unregister_tracepoint_module_notifier(struct notifier_block *nb) |
| 538 | struct tracepoint * const *begin, struct tracepoint * const *end) | ||
| 539 | { | 327 | { |
| 540 | if (!*tracepoint && begin != end) { | 328 | struct tp_module *tp_mod; |
| 541 | *tracepoint = begin; | 329 | int ret; |
| 542 | return 1; | ||
| 543 | } | ||
| 544 | if (*tracepoint >= begin && *tracepoint < end) | ||
| 545 | return 1; | ||
| 546 | return 0; | ||
| 547 | } | ||
| 548 | 330 | ||
| 549 | #ifdef CONFIG_MODULES | 331 | mutex_lock(&tracepoint_module_list_mutex); |
| 550 | static void tracepoint_get_iter(struct tracepoint_iter *iter) | 332 | ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); |
| 551 | { | 333 | if (ret) |
| 552 | int found = 0; | 334 | goto end; |
| 553 | struct tp_module *iter_mod; | 335 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) |
| 554 | 336 | (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); | |
| 555 | /* Core kernel tracepoints */ | ||
| 556 | if (!iter->module) { | ||
| 557 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 558 | __start___tracepoints_ptrs, | ||
| 559 | __stop___tracepoints_ptrs); | ||
| 560 | if (found) | ||
| 561 | goto end; | ||
| 562 | } | ||
| 563 | /* Tracepoints in modules */ | ||
| 564 | mutex_lock(&tracepoints_mutex); | ||
| 565 | list_for_each_entry(iter_mod, &tracepoint_module_list, list) { | ||
| 566 | /* | ||
| 567 | * Sorted module list | ||
| 568 | */ | ||
| 569 | if (iter_mod < iter->module) | ||
| 570 | continue; | ||
| 571 | else if (iter_mod > iter->module) | ||
| 572 | iter->tracepoint = NULL; | ||
| 573 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 574 | iter_mod->tracepoints_ptrs, | ||
| 575 | iter_mod->tracepoints_ptrs | ||
| 576 | + iter_mod->num_tracepoints); | ||
| 577 | if (found) { | ||
| 578 | iter->module = iter_mod; | ||
| 579 | break; | ||
| 580 | } | ||
| 581 | } | ||
| 582 | mutex_unlock(&tracepoints_mutex); | ||
| 583 | end: | 337 | end: |
| 584 | if (!found) | 338 | mutex_unlock(&tracepoint_module_list_mutex); |
| 585 | tracepoint_iter_reset(iter); | 339 | return ret; |
| 586 | } | ||
| 587 | #else /* CONFIG_MODULES */ | ||
| 588 | static void tracepoint_get_iter(struct tracepoint_iter *iter) | ||
| 589 | { | ||
| 590 | int found = 0; | ||
| 591 | |||
| 592 | /* Core kernel tracepoints */ | ||
| 593 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 594 | __start___tracepoints_ptrs, | ||
| 595 | __stop___tracepoints_ptrs); | ||
| 596 | if (!found) | ||
| 597 | tracepoint_iter_reset(iter); | ||
| 598 | } | ||
| 599 | #endif /* CONFIG_MODULES */ | ||
| 600 | |||
| 601 | void tracepoint_iter_start(struct tracepoint_iter *iter) | ||
| 602 | { | ||
| 603 | tracepoint_get_iter(iter); | ||
| 604 | } | ||
| 605 | EXPORT_SYMBOL_GPL(tracepoint_iter_start); | ||
| 606 | 340 | ||
| 607 | void tracepoint_iter_next(struct tracepoint_iter *iter) | ||
| 608 | { | ||
| 609 | iter->tracepoint++; | ||
| 610 | /* | ||
| 611 | * iter->tracepoint may be invalid because we blindly incremented it. | ||
| 612 | * Make sure it is valid by marshalling on the tracepoints, getting the | ||
| 613 | * tracepoints from following modules if necessary. | ||
| 614 | */ | ||
| 615 | tracepoint_get_iter(iter); | ||
| 616 | } | 341 | } |
| 617 | EXPORT_SYMBOL_GPL(tracepoint_iter_next); | 342 | EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); |
| 618 | 343 | ||
| 619 | void tracepoint_iter_stop(struct tracepoint_iter *iter) | 344 | /* |
| 345 | * Ensure the tracer unregistered the module's probes before the module | ||
| 346 | * teardown is performed. Prevents leaks of probe and data pointers. | ||
| 347 | */ | ||
| 348 | static void tp_module_going_check_quiescent(struct tracepoint * const *begin, | ||
| 349 | struct tracepoint * const *end) | ||
| 620 | { | 350 | { |
| 621 | } | 351 | struct tracepoint * const *iter; |
| 622 | EXPORT_SYMBOL_GPL(tracepoint_iter_stop); | ||
| 623 | 352 | ||
| 624 | void tracepoint_iter_reset(struct tracepoint_iter *iter) | 353 | if (!begin) |
| 625 | { | 354 | return; |
| 626 | #ifdef CONFIG_MODULES | 355 | for (iter = begin; iter < end; iter++) |
| 627 | iter->module = NULL; | 356 | WARN_ON_ONCE((*iter)->funcs); |
| 628 | #endif /* CONFIG_MODULES */ | ||
| 629 | iter->tracepoint = NULL; | ||
| 630 | } | 357 | } |
| 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | ||
| 632 | 358 | ||
| 633 | #ifdef CONFIG_MODULES | ||
| 634 | static int tracepoint_module_coming(struct module *mod) | 359 | static int tracepoint_module_coming(struct module *mod) |
| 635 | { | 360 | { |
| 636 | struct tp_module *tp_mod, *iter; | 361 | struct tp_module *tp_mod; |
| 637 | int ret = 0; | 362 | int ret = 0; |
| 638 | 363 | ||
| 364 | if (!mod->num_tracepoints) | ||
| 365 | return 0; | ||
| 366 | |||
| 639 | /* | 367 | /* |
| 640 | * We skip modules that taint the kernel, especially those with different | 368 | * We skip modules that taint the kernel, especially those with different |
| 641 | * module headers (for forced load), to make sure we don't cause a crash. | 369 | * module headers (for forced load), to make sure we don't cause a crash. |
| 642 | * Staging and out-of-tree GPL modules are fine. | 370 | * Staging, out-of-tree, and unsigned GPL modules are fine. |
| 643 | */ | 371 | */ |
| 644 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) | 372 | if (trace_module_has_bad_taint(mod)) |
| 645 | return 0; | 373 | return 0; |
| 646 | mutex_lock(&tracepoints_mutex); | 374 | mutex_lock(&tracepoint_module_list_mutex); |
| 647 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 375 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
| 648 | if (!tp_mod) { | 376 | if (!tp_mod) { |
| 649 | ret = -ENOMEM; | 377 | ret = -ENOMEM; |
| 650 | goto end; | 378 | goto end; |
| 651 | } | 379 | } |
| 652 | tp_mod->num_tracepoints = mod->num_tracepoints; | 380 | tp_mod->mod = mod; |
| 653 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; | 381 | list_add_tail(&tp_mod->list, &tracepoint_module_list); |
| 654 | 382 | blocking_notifier_call_chain(&tracepoint_notify_list, | |
| 655 | /* | 383 | MODULE_STATE_COMING, tp_mod); |
| 656 | * tracepoint_module_list is kept sorted by struct module pointer | ||
| 657 | * address for iteration on tracepoints from a seq_file that can release | ||
| 658 | * the mutex between calls. | ||
| 659 | */ | ||
| 660 | list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { | ||
| 661 | BUG_ON(iter == tp_mod); /* Should never be in the list twice */ | ||
| 662 | if (iter < tp_mod) { | ||
| 663 | /* We belong to the location right after iter. */ | ||
| 664 | list_add(&tp_mod->list, &iter->list); | ||
| 665 | goto module_added; | ||
| 666 | } | ||
| 667 | } | ||
| 668 | /* We belong to the beginning of the list */ | ||
| 669 | list_add(&tp_mod->list, &tracepoint_module_list); | ||
| 670 | module_added: | ||
| 671 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | ||
| 672 | mod->tracepoints_ptrs + mod->num_tracepoints); | ||
| 673 | end: | 384 | end: |
| 674 | mutex_unlock(&tracepoints_mutex); | 385 | mutex_unlock(&tracepoint_module_list_mutex); |
| 675 | return ret; | 386 | return ret; |
| 676 | } | 387 | } |
| 677 | 388 | ||
| 678 | static int tracepoint_module_going(struct module *mod) | 389 | static void tracepoint_module_going(struct module *mod) |
| 679 | { | 390 | { |
| 680 | struct tp_module *pos; | 391 | struct tp_module *tp_mod; |
| 681 | 392 | ||
| 682 | mutex_lock(&tracepoints_mutex); | 393 | if (!mod->num_tracepoints) |
| 683 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 394 | return; |
| 684 | mod->tracepoints_ptrs + mod->num_tracepoints); | 395 | |
| 685 | list_for_each_entry(pos, &tracepoint_module_list, list) { | 396 | mutex_lock(&tracepoint_module_list_mutex); |
| 686 | if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { | 397 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) { |
| 687 | list_del(&pos->list); | 398 | if (tp_mod->mod == mod) { |
| 688 | kfree(pos); | 399 | blocking_notifier_call_chain(&tracepoint_notify_list, |
| 400 | MODULE_STATE_GOING, tp_mod); | ||
| 401 | list_del(&tp_mod->list); | ||
| 402 | kfree(tp_mod); | ||
| 403 | /* | ||
| 404 | * Called the going notifier before checking for | ||
| 405 | * quiescence. | ||
| 406 | */ | ||
| 407 | tp_module_going_check_quiescent(mod->tracepoints_ptrs, | ||
| 408 | mod->tracepoints_ptrs + mod->num_tracepoints); | ||
| 689 | break; | 409 | break; |
| 690 | } | 410 | } |
| 691 | } | 411 | } |
| @@ -695,12 +415,11 @@ static int tracepoint_module_going(struct module *mod) | |||
| 695 | * flag on "going", in case a module taints the kernel only after being | 415 | * flag on "going", in case a module taints the kernel only after being |
| 696 | * loaded. | 416 | * loaded. |
| 697 | */ | 417 | */ |
| 698 | mutex_unlock(&tracepoints_mutex); | 418 | mutex_unlock(&tracepoint_module_list_mutex); |
| 699 | return 0; | ||
| 700 | } | 419 | } |
| 701 | 420 | ||
| 702 | int tracepoint_module_notify(struct notifier_block *self, | 421 | static int tracepoint_module_notify(struct notifier_block *self, |
| 703 | unsigned long val, void *data) | 422 | unsigned long val, void *data) |
| 704 | { | 423 | { |
| 705 | struct module *mod = data; | 424 | struct module *mod = data; |
| 706 | int ret = 0; | 425 | int ret = 0; |
| @@ -712,24 +431,58 @@ int tracepoint_module_notify(struct notifier_block *self, | |||
| 712 | case MODULE_STATE_LIVE: | 431 | case MODULE_STATE_LIVE: |
| 713 | break; | 432 | break; |
| 714 | case MODULE_STATE_GOING: | 433 | case MODULE_STATE_GOING: |
| 715 | ret = tracepoint_module_going(mod); | 434 | tracepoint_module_going(mod); |
| 435 | break; | ||
| 436 | case MODULE_STATE_UNFORMED: | ||
| 716 | break; | 437 | break; |
| 717 | } | 438 | } |
| 718 | return ret; | 439 | return ret; |
| 719 | } | 440 | } |
| 720 | 441 | ||
| 721 | struct notifier_block tracepoint_module_nb = { | 442 | static struct notifier_block tracepoint_module_nb = { |
| 722 | .notifier_call = tracepoint_module_notify, | 443 | .notifier_call = tracepoint_module_notify, |
| 723 | .priority = 0, | 444 | .priority = 0, |
| 724 | }; | 445 | }; |
| 725 | 446 | ||
| 726 | static int init_tracepoints(void) | 447 | static __init int init_tracepoints(void) |
| 727 | { | 448 | { |
| 728 | return register_module_notifier(&tracepoint_module_nb); | 449 | int ret; |
| 450 | |||
| 451 | ret = register_module_notifier(&tracepoint_module_nb); | ||
| 452 | if (ret) | ||
| 453 | pr_warning("Failed to register tracepoint module enter notifier\n"); | ||
| 454 | |||
| 455 | return ret; | ||
| 729 | } | 456 | } |
| 730 | __initcall(init_tracepoints); | 457 | __initcall(init_tracepoints); |
| 731 | #endif /* CONFIG_MODULES */ | 458 | #endif /* CONFIG_MODULES */ |
| 732 | 459 | ||
| 460 | static void for_each_tracepoint_range(struct tracepoint * const *begin, | ||
| 461 | struct tracepoint * const *end, | ||
| 462 | void (*fct)(struct tracepoint *tp, void *priv), | ||
| 463 | void *priv) | ||
| 464 | { | ||
| 465 | struct tracepoint * const *iter; | ||
| 466 | |||
| 467 | if (!begin) | ||
| 468 | return; | ||
| 469 | for (iter = begin; iter < end; iter++) | ||
| 470 | fct(*iter, priv); | ||
| 471 | } | ||
| 472 | |||
| 473 | /** | ||
| 474 | * for_each_kernel_tracepoint - iteration on all kernel tracepoints | ||
| 475 | * @fct: callback | ||
| 476 | * @priv: private data | ||
| 477 | */ | ||
| 478 | void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), | ||
| 479 | void *priv) | ||
| 480 | { | ||
| 481 | for_each_tracepoint_range(__start___tracepoints_ptrs, | ||
| 482 | __stop___tracepoints_ptrs, fct, priv); | ||
| 483 | } | ||
| 484 | EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); | ||
| 485 | |||
| 733 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 486 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
| 734 | 487 | ||
| 735 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 488 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
diff --git a/kernel/up.c b/kernel/up.c index 509403e3fbc6..1760bf3d1463 100644 --- a/kernel/up.c +++ b/kernel/up.c | |||
| @@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 22 | } | 22 | } |
| 23 | EXPORT_SYMBOL(smp_call_function_single); | 23 | EXPORT_SYMBOL(smp_call_function_single); |
| 24 | 24 | ||
| 25 | void __smp_call_function_single(int cpu, struct call_single_data *csd, | 25 | int smp_call_function_single_async(int cpu, struct call_single_data *csd) |
| 26 | int wait) | ||
| 27 | { | 26 | { |
| 28 | unsigned long flags; | 27 | unsigned long flags; |
| 29 | 28 | ||
| 30 | local_irq_save(flags); | 29 | local_irq_save(flags); |
| 31 | csd->func(csd->info); | 30 | csd->func(csd->info); |
| 32 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
| 32 | return 0; | ||
| 33 | } | 33 | } |
| 34 | EXPORT_SYMBOL(__smp_call_function_single); | 34 | EXPORT_SYMBOL(smp_call_function_single_async); |
| 35 | 35 | ||
| 36 | int on_each_cpu(smp_call_func_t func, void *info, int wait) | 36 | int on_each_cpu(smp_call_func_t func, void *info, int wait) |
| 37 | { | 37 | { |
diff --git a/kernel/user.c b/kernel/user.c index c006131beb77..294fc6a94168 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -222,5 +222,4 @@ static int __init uid_cache_init(void) | |||
| 222 | 222 | ||
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | subsys_initcall(uid_cache_init); | |
| 226 | module_init(uid_cache_init); | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index dd06439b9c84..0d8f6023fd8d 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -902,4 +902,4 @@ static __init int user_namespaces_init(void) | |||
| 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); | 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); |
| 903 | return 0; | 903 | return 0; |
| 904 | } | 904 | } |
| 905 | module_init(user_namespaces_init); | 905 | subsys_initcall(user_namespaces_init); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 4431610f049a..e90089fd78e0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -158,14 +158,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 159 | void touch_nmi_watchdog(void) | 159 | void touch_nmi_watchdog(void) |
| 160 | { | 160 | { |
| 161 | if (watchdog_user_enabled) { | 161 | /* |
| 162 | unsigned cpu; | 162 | * Using __raw here because some code paths have |
| 163 | 163 | * preemption enabled. If preemption is enabled | |
| 164 | for_each_present_cpu(cpu) { | 164 | * then interrupts should be enabled too, in which |
| 165 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | 165 | * case we shouldn't have to worry about the watchdog |
| 166 | per_cpu(watchdog_nmi_touch, cpu) = true; | 166 | * going off. |
| 167 | } | 167 | */ |
| 168 | } | 168 | __raw_get_cpu_var(watchdog_nmi_touch) = true; |
| 169 | touch_softlockup_watchdog(); | 169 | touch_softlockup_watchdog(); |
| 170 | } | 170 | } |
| 171 | EXPORT_SYMBOL(touch_nmi_watchdog); | 171 | EXPORT_SYMBOL(touch_nmi_watchdog); |
| @@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info) | |||
| 505 | 505 | ||
| 506 | static void update_timers(int cpu) | 506 | static void update_timers(int cpu) |
| 507 | { | 507 | { |
| 508 | struct call_single_data data = {.func = restart_watchdog_hrtimer}; | ||
| 509 | /* | 508 | /* |
| 510 | * Make sure that perf event counter will adopt to a new | 509 | * Make sure that perf event counter will adopt to a new |
| 511 | * sampling period. Updating the sampling period directly would | 510 | * sampling period. Updating the sampling period directly would |
| @@ -515,7 +514,7 @@ static void update_timers(int cpu) | |||
| 515 | * might be late already so we have to restart the timer as well. | 514 | * might be late already so we have to restart the timer as well. |
| 516 | */ | 515 | */ |
| 517 | watchdog_nmi_disable(cpu); | 516 | watchdog_nmi_disable(cpu); |
| 518 | __smp_call_function_single(cpu, &data, 1); | 517 | smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1); |
| 519 | watchdog_nmi_enable(cpu); | 518 | watchdog_nmi_enable(cpu); |
| 520 | } | 519 | } |
| 521 | 520 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 193e977a10ea..0ee63af30bd1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -516,6 +516,13 @@ void destroy_work_on_stack(struct work_struct *work) | |||
| 516 | } | 516 | } |
| 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); |
| 518 | 518 | ||
| 519 | void destroy_delayed_work_on_stack(struct delayed_work *work) | ||
| 520 | { | ||
| 521 | destroy_timer_on_stack(&work->timer); | ||
| 522 | debug_object_free(&work->work, &work_debug_descr); | ||
| 523 | } | ||
| 524 | EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); | ||
| 525 | |||
| 519 | #else | 526 | #else |
| 520 | static inline void debug_work_activate(struct work_struct *work) { } | 527 | static inline void debug_work_activate(struct work_struct *work) { } |
| 521 | static inline void debug_work_deactivate(struct work_struct *work) { } | 528 | static inline void debug_work_deactivate(struct work_struct *work) { } |
| @@ -3225,7 +3232,7 @@ static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, | |||
| 3225 | return -ENOMEM; | 3232 | return -ENOMEM; |
| 3226 | 3233 | ||
| 3227 | if (sscanf(buf, "%d", &attrs->nice) == 1 && | 3234 | if (sscanf(buf, "%d", &attrs->nice) == 1 && |
| 3228 | attrs->nice >= -20 && attrs->nice <= 19) | 3235 | attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) |
| 3229 | ret = apply_workqueue_attrs(wq, attrs); | 3236 | ret = apply_workqueue_attrs(wq, attrs); |
| 3230 | else | 3237 | else |
| 3231 | ret = -EINVAL; | 3238 | ret = -EINVAL; |
