diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 27 | ||||
-rw-r--r-- | kernel/audit.h | 6 | ||||
-rw-r--r-- | kernel/auditfilter.c | 33 | ||||
-rw-r--r-- | kernel/auditsc.c | 133 | ||||
-rw-r--r-- | kernel/futex.c | 37 | ||||
-rw-r--r-- | kernel/relay.c | 4 | ||||
-rw-r--r-- | kernel/seccomp.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 55 | ||||
-rw-r--r-- | kernel/trace/trace_events_trigger.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 21 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 20 | ||||
-rw-r--r-- | kernel/tracepoint.c | 516 |
15 files changed, 469 insertions, 407 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 95a20f3f52f1..7c2893602d06 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -182,7 +182,7 @@ struct audit_buffer { | |||
182 | 182 | ||
183 | struct audit_reply { | 183 | struct audit_reply { |
184 | __u32 portid; | 184 | __u32 portid; |
185 | struct net *net; | 185 | struct net *net; |
186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
187 | }; | 187 | }; |
188 | 188 | ||
@@ -396,7 +396,7 @@ static void audit_printk_skb(struct sk_buff *skb) | |||
396 | if (printk_ratelimit()) | 396 | if (printk_ratelimit()) |
397 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); | 397 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); |
398 | else | 398 | else |
399 | audit_log_lost("printk limit exceeded\n"); | 399 | audit_log_lost("printk limit exceeded"); |
400 | } | 400 | } |
401 | 401 | ||
402 | audit_hold_skb(skb); | 402 | audit_hold_skb(skb); |
@@ -412,7 +412,7 @@ static void kauditd_send_skb(struct sk_buff *skb) | |||
412 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ | 412 | BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ |
413 | if (audit_pid) { | 413 | if (audit_pid) { |
414 | pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); | 414 | pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); |
415 | audit_log_lost("auditd disappeared\n"); | 415 | audit_log_lost("auditd disappeared"); |
416 | audit_pid = 0; | 416 | audit_pid = 0; |
417 | audit_sock = NULL; | 417 | audit_sock = NULL; |
418 | } | 418 | } |
@@ -607,7 +607,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
607 | { | 607 | { |
608 | int err = 0; | 608 | int err = 0; |
609 | 609 | ||
610 | /* Only support the initial namespaces for now. */ | 610 | /* Only support initial user namespace for now. */ |
611 | /* | 611 | /* |
612 | * We return ECONNREFUSED because it tricks userspace into thinking | 612 | * We return ECONNREFUSED because it tricks userspace into thinking |
613 | * that audit was not configured into the kernel. Lots of users | 613 | * that audit was not configured into the kernel. Lots of users |
@@ -618,8 +618,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
618 | * userspace will reject all logins. This should be removed when we | 618 | * userspace will reject all logins. This should be removed when we |
619 | * support non init namespaces!! | 619 | * support non init namespaces!! |
620 | */ | 620 | */ |
621 | if ((current_user_ns() != &init_user_ns) || | 621 | if (current_user_ns() != &init_user_ns) |
622 | (task_active_pid_ns(current) != &init_pid_ns)) | ||
623 | return -ECONNREFUSED; | 622 | return -ECONNREFUSED; |
624 | 623 | ||
625 | switch (msg_type) { | 624 | switch (msg_type) { |
@@ -639,6 +638,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
639 | case AUDIT_TTY_SET: | 638 | case AUDIT_TTY_SET: |
640 | case AUDIT_TRIM: | 639 | case AUDIT_TRIM: |
641 | case AUDIT_MAKE_EQUIV: | 640 | case AUDIT_MAKE_EQUIV: |
641 | /* Only support auditd and auditctl in initial pid namespace | ||
642 | * for now. */ | ||
643 | if ((task_active_pid_ns(current) != &init_pid_ns)) | ||
644 | return -EPERM; | ||
645 | |||
642 | if (!capable(CAP_AUDIT_CONTROL)) | 646 | if (!capable(CAP_AUDIT_CONTROL)) |
643 | err = -EPERM; | 647 | err = -EPERM; |
644 | break; | 648 | break; |
@@ -659,6 +663,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) | |||
659 | { | 663 | { |
660 | int rc = 0; | 664 | int rc = 0; |
661 | uid_t uid = from_kuid(&init_user_ns, current_uid()); | 665 | uid_t uid = from_kuid(&init_user_ns, current_uid()); |
666 | pid_t pid = task_tgid_nr(current); | ||
662 | 667 | ||
663 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) { | 668 | if (!audit_enabled && msg_type != AUDIT_USER_AVC) { |
664 | *ab = NULL; | 669 | *ab = NULL; |
@@ -668,7 +673,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) | |||
668 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); | 673 | *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); |
669 | if (unlikely(!*ab)) | 674 | if (unlikely(!*ab)) |
670 | return rc; | 675 | return rc; |
671 | audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid); | 676 | audit_log_format(*ab, "pid=%d uid=%u", pid, uid); |
672 | audit_log_session_info(*ab); | 677 | audit_log_session_info(*ab); |
673 | audit_log_task_context(*ab); | 678 | audit_log_task_context(*ab); |
674 | 679 | ||
@@ -1097,7 +1102,7 @@ static void __net_exit audit_net_exit(struct net *net) | |||
1097 | audit_sock = NULL; | 1102 | audit_sock = NULL; |
1098 | } | 1103 | } |
1099 | 1104 | ||
1100 | rcu_assign_pointer(aunet->nlsk, NULL); | 1105 | RCU_INIT_POINTER(aunet->nlsk, NULL); |
1101 | synchronize_net(); | 1106 | synchronize_net(); |
1102 | netlink_kernel_release(sock); | 1107 | netlink_kernel_release(sock); |
1103 | } | 1108 | } |
@@ -1829,11 +1834,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) | |||
1829 | spin_unlock_irq(&tsk->sighand->siglock); | 1834 | spin_unlock_irq(&tsk->sighand->siglock); |
1830 | 1835 | ||
1831 | audit_log_format(ab, | 1836 | audit_log_format(ab, |
1832 | " ppid=%ld pid=%d auid=%u uid=%u gid=%u" | 1837 | " ppid=%d pid=%d auid=%u uid=%u gid=%u" |
1833 | " euid=%u suid=%u fsuid=%u" | 1838 | " euid=%u suid=%u fsuid=%u" |
1834 | " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", | 1839 | " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", |
1835 | sys_getppid(), | 1840 | task_ppid_nr(tsk), |
1836 | tsk->pid, | 1841 | task_pid_nr(tsk), |
1837 | from_kuid(&init_user_ns, audit_get_loginuid(tsk)), | 1842 | from_kuid(&init_user_ns, audit_get_loginuid(tsk)), |
1838 | from_kuid(&init_user_ns, cred->uid), | 1843 | from_kuid(&init_user_ns, cred->uid), |
1839 | from_kgid(&init_user_ns, cred->gid), | 1844 | from_kgid(&init_user_ns, cred->gid), |
diff --git a/kernel/audit.h b/kernel/audit.h index 8df132214606..7bb65730c890 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -106,6 +106,11 @@ struct audit_names { | |||
106 | bool should_free; | 106 | bool should_free; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | struct audit_proctitle { | ||
110 | int len; /* length of the cmdline field. */ | ||
111 | char *value; /* the cmdline field */ | ||
112 | }; | ||
113 | |||
109 | /* The per-task audit context. */ | 114 | /* The per-task audit context. */ |
110 | struct audit_context { | 115 | struct audit_context { |
111 | int dummy; /* must be the first element */ | 116 | int dummy; /* must be the first element */ |
@@ -202,6 +207,7 @@ struct audit_context { | |||
202 | } execve; | 207 | } execve; |
203 | }; | 208 | }; |
204 | int fds[2]; | 209 | int fds[2]; |
210 | struct audit_proctitle proctitle; | ||
205 | 211 | ||
206 | #if AUDIT_DEBUG | 212 | #if AUDIT_DEBUG |
207 | int put_count; | 213 | int put_count; |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 92062fd6cc8c..8e9bc9c3dbb7 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -19,6 +19,8 @@ | |||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
23 | |||
22 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
23 | #include <linux/audit.h> | 25 | #include <linux/audit.h> |
24 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
@@ -226,7 +228,7 @@ static int audit_match_signal(struct audit_entry *entry) | |||
226 | #endif | 228 | #endif |
227 | 229 | ||
228 | /* Common user-space to kernel rule translation. */ | 230 | /* Common user-space to kernel rule translation. */ |
229 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | 231 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule) |
230 | { | 232 | { |
231 | unsigned listnr; | 233 | unsigned listnr; |
232 | struct audit_entry *entry; | 234 | struct audit_entry *entry; |
@@ -249,7 +251,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | |||
249 | ; | 251 | ; |
250 | } | 252 | } |
251 | if (unlikely(rule->action == AUDIT_POSSIBLE)) { | 253 | if (unlikely(rule->action == AUDIT_POSSIBLE)) { |
252 | printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n"); | 254 | pr_err("AUDIT_POSSIBLE is deprecated\n"); |
253 | goto exit_err; | 255 | goto exit_err; |
254 | } | 256 | } |
255 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) | 257 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) |
@@ -403,7 +405,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
403 | int i; | 405 | int i; |
404 | char *str; | 406 | char *str; |
405 | 407 | ||
406 | entry = audit_to_entry_common((struct audit_rule *)data); | 408 | entry = audit_to_entry_common(data); |
407 | if (IS_ERR(entry)) | 409 | if (IS_ERR(entry)) |
408 | goto exit_nofree; | 410 | goto exit_nofree; |
409 | 411 | ||
@@ -431,6 +433,19 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
431 | f->val = 0; | 433 | f->val = 0; |
432 | } | 434 | } |
433 | 435 | ||
436 | if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) { | ||
437 | struct pid *pid; | ||
438 | rcu_read_lock(); | ||
439 | pid = find_vpid(f->val); | ||
440 | if (!pid) { | ||
441 | rcu_read_unlock(); | ||
442 | err = -ESRCH; | ||
443 | goto exit_free; | ||
444 | } | ||
445 | f->val = pid_nr(pid); | ||
446 | rcu_read_unlock(); | ||
447 | } | ||
448 | |||
434 | err = audit_field_valid(entry, f); | 449 | err = audit_field_valid(entry, f); |
435 | if (err) | 450 | if (err) |
436 | goto exit_free; | 451 | goto exit_free; |
@@ -479,8 +494,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
479 | /* Keep currently invalid fields around in case they | 494 | /* Keep currently invalid fields around in case they |
480 | * become valid after a policy reload. */ | 495 | * become valid after a policy reload. */ |
481 | if (err == -EINVAL) { | 496 | if (err == -EINVAL) { |
482 | printk(KERN_WARNING "audit rule for LSM " | 497 | pr_warn("audit rule for LSM \'%s\' is invalid\n", |
483 | "\'%s\' is invalid\n", str); | 498 | str); |
484 | err = 0; | 499 | err = 0; |
485 | } | 500 | } |
486 | if (err) { | 501 | if (err) { |
@@ -709,8 +724,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df, | |||
709 | /* Keep currently invalid fields around in case they | 724 | /* Keep currently invalid fields around in case they |
710 | * become valid after a policy reload. */ | 725 | * become valid after a policy reload. */ |
711 | if (ret == -EINVAL) { | 726 | if (ret == -EINVAL) { |
712 | printk(KERN_WARNING "audit rule for LSM \'%s\' is " | 727 | pr_warn("audit rule for LSM \'%s\' is invalid\n", |
713 | "invalid\n", df->lsm_str); | 728 | df->lsm_str); |
714 | ret = 0; | 729 | ret = 0; |
715 | } | 730 | } |
716 | 731 | ||
@@ -1240,12 +1255,14 @@ static int audit_filter_user_rules(struct audit_krule *rule, int type, | |||
1240 | 1255 | ||
1241 | for (i = 0; i < rule->field_count; i++) { | 1256 | for (i = 0; i < rule->field_count; i++) { |
1242 | struct audit_field *f = &rule->fields[i]; | 1257 | struct audit_field *f = &rule->fields[i]; |
1258 | pid_t pid; | ||
1243 | int result = 0; | 1259 | int result = 0; |
1244 | u32 sid; | 1260 | u32 sid; |
1245 | 1261 | ||
1246 | switch (f->type) { | 1262 | switch (f->type) { |
1247 | case AUDIT_PID: | 1263 | case AUDIT_PID: |
1248 | result = audit_comparator(task_pid_vnr(current), f->op, f->val); | 1264 | pid = task_pid_nr(current); |
1265 | result = audit_comparator(pid, f->op, f->val); | ||
1249 | break; | 1266 | break; |
1250 | case AUDIT_UID: | 1267 | case AUDIT_UID: |
1251 | result = audit_uid_comparator(current_uid(), f->op, f->uid); | 1268 | result = audit_uid_comparator(current_uid(), f->op, f->uid); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 7aef2f4b6c64..f251a5e8d17a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -42,6 +42,8 @@ | |||
42 | * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. | 42 | * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
46 | |||
45 | #include <linux/init.h> | 47 | #include <linux/init.h> |
46 | #include <asm/types.h> | 48 | #include <asm/types.h> |
47 | #include <linux/atomic.h> | 49 | #include <linux/atomic.h> |
@@ -68,6 +70,7 @@ | |||
68 | #include <linux/capability.h> | 70 | #include <linux/capability.h> |
69 | #include <linux/fs_struct.h> | 71 | #include <linux/fs_struct.h> |
70 | #include <linux/compat.h> | 72 | #include <linux/compat.h> |
73 | #include <linux/ctype.h> | ||
71 | 74 | ||
72 | #include "audit.h" | 75 | #include "audit.h" |
73 | 76 | ||
@@ -79,6 +82,9 @@ | |||
79 | /* no execve audit message should be longer than this (userspace limits) */ | 82 | /* no execve audit message should be longer than this (userspace limits) */ |
80 | #define MAX_EXECVE_AUDIT_LEN 7500 | 83 | #define MAX_EXECVE_AUDIT_LEN 7500 |
81 | 84 | ||
85 | /* max length to print of cmdline/proctitle value during audit */ | ||
86 | #define MAX_PROCTITLE_AUDIT_LEN 128 | ||
87 | |||
82 | /* number of audit rules */ | 88 | /* number of audit rules */ |
83 | int audit_n_rules; | 89 | int audit_n_rules; |
84 | 90 | ||
@@ -451,15 +457,17 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
451 | struct audit_field *f = &rule->fields[i]; | 457 | struct audit_field *f = &rule->fields[i]; |
452 | struct audit_names *n; | 458 | struct audit_names *n; |
453 | int result = 0; | 459 | int result = 0; |
460 | pid_t pid; | ||
454 | 461 | ||
455 | switch (f->type) { | 462 | switch (f->type) { |
456 | case AUDIT_PID: | 463 | case AUDIT_PID: |
457 | result = audit_comparator(tsk->pid, f->op, f->val); | 464 | pid = task_pid_nr(tsk); |
465 | result = audit_comparator(pid, f->op, f->val); | ||
458 | break; | 466 | break; |
459 | case AUDIT_PPID: | 467 | case AUDIT_PPID: |
460 | if (ctx) { | 468 | if (ctx) { |
461 | if (!ctx->ppid) | 469 | if (!ctx->ppid) |
462 | ctx->ppid = sys_getppid(); | 470 | ctx->ppid = task_ppid_nr(tsk); |
463 | result = audit_comparator(ctx->ppid, f->op, f->val); | 471 | result = audit_comparator(ctx->ppid, f->op, f->val); |
464 | } | 472 | } |
465 | break; | 473 | break; |
@@ -805,7 +813,8 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) | |||
805 | rcu_read_unlock(); | 813 | rcu_read_unlock(); |
806 | } | 814 | } |
807 | 815 | ||
808 | static inline struct audit_context *audit_get_context(struct task_struct *tsk, | 816 | /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ |
817 | static inline struct audit_context *audit_take_context(struct task_struct *tsk, | ||
809 | int return_valid, | 818 | int return_valid, |
810 | long return_code) | 819 | long return_code) |
811 | { | 820 | { |
@@ -842,6 +851,13 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, | |||
842 | return context; | 851 | return context; |
843 | } | 852 | } |
844 | 853 | ||
854 | static inline void audit_proctitle_free(struct audit_context *context) | ||
855 | { | ||
856 | kfree(context->proctitle.value); | ||
857 | context->proctitle.value = NULL; | ||
858 | context->proctitle.len = 0; | ||
859 | } | ||
860 | |||
845 | static inline void audit_free_names(struct audit_context *context) | 861 | static inline void audit_free_names(struct audit_context *context) |
846 | { | 862 | { |
847 | struct audit_names *n, *next; | 863 | struct audit_names *n, *next; |
@@ -850,16 +866,15 @@ static inline void audit_free_names(struct audit_context *context) | |||
850 | if (context->put_count + context->ino_count != context->name_count) { | 866 | if (context->put_count + context->ino_count != context->name_count) { |
851 | int i = 0; | 867 | int i = 0; |
852 | 868 | ||
853 | printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" | 869 | pr_err("%s:%d(:%d): major=%d in_syscall=%d" |
854 | " name_count=%d put_count=%d" | 870 | " name_count=%d put_count=%d ino_count=%d" |
855 | " ino_count=%d [NOT freeing]\n", | 871 | " [NOT freeing]\n", __FILE__, __LINE__, |
856 | __FILE__, __LINE__, | ||
857 | context->serial, context->major, context->in_syscall, | 872 | context->serial, context->major, context->in_syscall, |
858 | context->name_count, context->put_count, | 873 | context->name_count, context->put_count, |
859 | context->ino_count); | 874 | context->ino_count); |
860 | list_for_each_entry(n, &context->names_list, list) { | 875 | list_for_each_entry(n, &context->names_list, list) { |
861 | printk(KERN_ERR "names[%d] = %p = %s\n", i++, | 876 | pr_err("names[%d] = %p = %s\n", i++, n->name, |
862 | n->name, n->name->name ?: "(null)"); | 877 | n->name->name ?: "(null)"); |
863 | } | 878 | } |
864 | dump_stack(); | 879 | dump_stack(); |
865 | return; | 880 | return; |
@@ -955,6 +970,7 @@ static inline void audit_free_context(struct audit_context *context) | |||
955 | audit_free_aux(context); | 970 | audit_free_aux(context); |
956 | kfree(context->filterkey); | 971 | kfree(context->filterkey); |
957 | kfree(context->sockaddr); | 972 | kfree(context->sockaddr); |
973 | audit_proctitle_free(context); | ||
958 | kfree(context); | 974 | kfree(context); |
959 | } | 975 | } |
960 | 976 | ||
@@ -1157,7 +1173,7 @@ static void audit_log_execve_info(struct audit_context *context, | |||
1157 | */ | 1173 | */ |
1158 | buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); | 1174 | buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); |
1159 | if (!buf) { | 1175 | if (!buf) { |
1160 | audit_panic("out of memory for argv string\n"); | 1176 | audit_panic("out of memory for argv string"); |
1161 | return; | 1177 | return; |
1162 | } | 1178 | } |
1163 | 1179 | ||
@@ -1271,6 +1287,59 @@ static void show_special(struct audit_context *context, int *call_panic) | |||
1271 | audit_log_end(ab); | 1287 | audit_log_end(ab); |
1272 | } | 1288 | } |
1273 | 1289 | ||
1290 | static inline int audit_proctitle_rtrim(char *proctitle, int len) | ||
1291 | { | ||
1292 | char *end = proctitle + len - 1; | ||
1293 | while (end > proctitle && !isprint(*end)) | ||
1294 | end--; | ||
1295 | |||
1296 | /* catch the case where proctitle is only 1 non-print character */ | ||
1297 | len = end - proctitle + 1; | ||
1298 | len -= isprint(proctitle[len-1]) == 0; | ||
1299 | return len; | ||
1300 | } | ||
1301 | |||
1302 | static void audit_log_proctitle(struct task_struct *tsk, | ||
1303 | struct audit_context *context) | ||
1304 | { | ||
1305 | int res; | ||
1306 | char *buf; | ||
1307 | char *msg = "(null)"; | ||
1308 | int len = strlen(msg); | ||
1309 | struct audit_buffer *ab; | ||
1310 | |||
1311 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); | ||
1312 | if (!ab) | ||
1313 | return; /* audit_panic or being filtered */ | ||
1314 | |||
1315 | audit_log_format(ab, "proctitle="); | ||
1316 | |||
1317 | /* Not cached */ | ||
1318 | if (!context->proctitle.value) { | ||
1319 | buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); | ||
1320 | if (!buf) | ||
1321 | goto out; | ||
1322 | /* Historically called this from procfs naming */ | ||
1323 | res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); | ||
1324 | if (res == 0) { | ||
1325 | kfree(buf); | ||
1326 | goto out; | ||
1327 | } | ||
1328 | res = audit_proctitle_rtrim(buf, res); | ||
1329 | if (res == 0) { | ||
1330 | kfree(buf); | ||
1331 | goto out; | ||
1332 | } | ||
1333 | context->proctitle.value = buf; | ||
1334 | context->proctitle.len = res; | ||
1335 | } | ||
1336 | msg = context->proctitle.value; | ||
1337 | len = context->proctitle.len; | ||
1338 | out: | ||
1339 | audit_log_n_untrustedstring(ab, msg, len); | ||
1340 | audit_log_end(ab); | ||
1341 | } | ||
1342 | |||
1274 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) | 1343 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) |
1275 | { | 1344 | { |
1276 | int i, call_panic = 0; | 1345 | int i, call_panic = 0; |
@@ -1388,6 +1457,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1388 | audit_log_name(context, n, NULL, i++, &call_panic); | 1457 | audit_log_name(context, n, NULL, i++, &call_panic); |
1389 | } | 1458 | } |
1390 | 1459 | ||
1460 | audit_log_proctitle(tsk, context); | ||
1461 | |||
1391 | /* Send end of event record to help user space know we are finished */ | 1462 | /* Send end of event record to help user space know we are finished */ |
1392 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); | 1463 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); |
1393 | if (ab) | 1464 | if (ab) |
@@ -1406,7 +1477,7 @@ void __audit_free(struct task_struct *tsk) | |||
1406 | { | 1477 | { |
1407 | struct audit_context *context; | 1478 | struct audit_context *context; |
1408 | 1479 | ||
1409 | context = audit_get_context(tsk, 0, 0); | 1480 | context = audit_take_context(tsk, 0, 0); |
1410 | if (!context) | 1481 | if (!context) |
1411 | return; | 1482 | return; |
1412 | 1483 | ||
@@ -1500,7 +1571,7 @@ void __audit_syscall_exit(int success, long return_code) | |||
1500 | else | 1571 | else |
1501 | success = AUDITSC_FAILURE; | 1572 | success = AUDITSC_FAILURE; |
1502 | 1573 | ||
1503 | context = audit_get_context(tsk, success, return_code); | 1574 | context = audit_take_context(tsk, success, return_code); |
1504 | if (!context) | 1575 | if (!context) |
1505 | return; | 1576 | return; |
1506 | 1577 | ||
@@ -1550,7 +1621,7 @@ static inline void handle_one(const struct inode *inode) | |||
1550 | if (likely(put_tree_ref(context, chunk))) | 1621 | if (likely(put_tree_ref(context, chunk))) |
1551 | return; | 1622 | return; |
1552 | if (unlikely(!grow_tree_refs(context))) { | 1623 | if (unlikely(!grow_tree_refs(context))) { |
1553 | printk(KERN_WARNING "out of memory, audit has lost a tree reference\n"); | 1624 | pr_warn("out of memory, audit has lost a tree reference\n"); |
1554 | audit_set_auditable(context); | 1625 | audit_set_auditable(context); |
1555 | audit_put_chunk(chunk); | 1626 | audit_put_chunk(chunk); |
1556 | unroll_tree_refs(context, p, count); | 1627 | unroll_tree_refs(context, p, count); |
@@ -1609,8 +1680,7 @@ retry: | |||
1609 | goto retry; | 1680 | goto retry; |
1610 | } | 1681 | } |
1611 | /* too bad */ | 1682 | /* too bad */ |
1612 | printk(KERN_WARNING | 1683 | pr_warn("out of memory, audit has lost a tree reference\n"); |
1613 | "out of memory, audit has lost a tree reference\n"); | ||
1614 | unroll_tree_refs(context, p, count); | 1684 | unroll_tree_refs(context, p, count); |
1615 | audit_set_auditable(context); | 1685 | audit_set_auditable(context); |
1616 | return; | 1686 | return; |
@@ -1682,7 +1752,7 @@ void __audit_getname(struct filename *name) | |||
1682 | 1752 | ||
1683 | if (!context->in_syscall) { | 1753 | if (!context->in_syscall) { |
1684 | #if AUDIT_DEBUG == 2 | 1754 | #if AUDIT_DEBUG == 2 |
1685 | printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n", | 1755 | pr_err("%s:%d(:%d): ignoring getname(%p)\n", |
1686 | __FILE__, __LINE__, context->serial, name); | 1756 | __FILE__, __LINE__, context->serial, name); |
1687 | dump_stack(); | 1757 | dump_stack(); |
1688 | #endif | 1758 | #endif |
@@ -1721,15 +1791,15 @@ void audit_putname(struct filename *name) | |||
1721 | BUG_ON(!context); | 1791 | BUG_ON(!context); |
1722 | if (!name->aname || !context->in_syscall) { | 1792 | if (!name->aname || !context->in_syscall) { |
1723 | #if AUDIT_DEBUG == 2 | 1793 | #if AUDIT_DEBUG == 2 |
1724 | printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", | 1794 | pr_err("%s:%d(:%d): final_putname(%p)\n", |
1725 | __FILE__, __LINE__, context->serial, name); | 1795 | __FILE__, __LINE__, context->serial, name); |
1726 | if (context->name_count) { | 1796 | if (context->name_count) { |
1727 | struct audit_names *n; | 1797 | struct audit_names *n; |
1728 | int i = 0; | 1798 | int i = 0; |
1729 | 1799 | ||
1730 | list_for_each_entry(n, &context->names_list, list) | 1800 | list_for_each_entry(n, &context->names_list, list) |
1731 | printk(KERN_ERR "name[%d] = %p = %s\n", i++, | 1801 | pr_err("name[%d] = %p = %s\n", i++, n->name, |
1732 | n->name, n->name->name ?: "(null)"); | 1802 | n->name->name ?: "(null)"); |
1733 | } | 1803 | } |
1734 | #endif | 1804 | #endif |
1735 | final_putname(name); | 1805 | final_putname(name); |
@@ -1738,9 +1808,8 @@ void audit_putname(struct filename *name) | |||
1738 | else { | 1808 | else { |
1739 | ++context->put_count; | 1809 | ++context->put_count; |
1740 | if (context->put_count > context->name_count) { | 1810 | if (context->put_count > context->name_count) { |
1741 | printk(KERN_ERR "%s:%d(:%d): major=%d" | 1811 | pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)" |
1742 | " in_syscall=%d putname(%p) name_count=%d" | 1812 | " name_count=%d put_count=%d\n", |
1743 | " put_count=%d\n", | ||
1744 | __FILE__, __LINE__, | 1813 | __FILE__, __LINE__, |
1745 | context->serial, context->major, | 1814 | context->serial, context->major, |
1746 | context->in_syscall, name->name, | 1815 | context->in_syscall, name->name, |
@@ -1981,12 +2050,10 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, | |||
1981 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); | 2050 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); |
1982 | if (!ab) | 2051 | if (!ab) |
1983 | return; | 2052 | return; |
1984 | audit_log_format(ab, "pid=%d uid=%u" | 2053 | audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); |
1985 | " old-auid=%u new-auid=%u old-ses=%u new-ses=%u" | 2054 | audit_log_task_context(ab); |
1986 | " res=%d", | 2055 | audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", |
1987 | current->pid, uid, | 2056 | oldloginuid, loginuid, oldsessionid, sessionid, !rc); |
1988 | oldloginuid, loginuid, oldsessionid, sessionid, | ||
1989 | !rc); | ||
1990 | audit_log_end(ab); | 2057 | audit_log_end(ab); |
1991 | } | 2058 | } |
1992 | 2059 | ||
@@ -2208,7 +2275,7 @@ void __audit_ptrace(struct task_struct *t) | |||
2208 | { | 2275 | { |
2209 | struct audit_context *context = current->audit_context; | 2276 | struct audit_context *context = current->audit_context; |
2210 | 2277 | ||
2211 | context->target_pid = t->pid; | 2278 | context->target_pid = task_pid_nr(t); |
2212 | context->target_auid = audit_get_loginuid(t); | 2279 | context->target_auid = audit_get_loginuid(t); |
2213 | context->target_uid = task_uid(t); | 2280 | context->target_uid = task_uid(t); |
2214 | context->target_sessionid = audit_get_sessionid(t); | 2281 | context->target_sessionid = audit_get_sessionid(t); |
@@ -2233,7 +2300,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2233 | 2300 | ||
2234 | if (audit_pid && t->tgid == audit_pid) { | 2301 | if (audit_pid && t->tgid == audit_pid) { |
2235 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { | 2302 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
2236 | audit_sig_pid = tsk->pid; | 2303 | audit_sig_pid = task_pid_nr(tsk); |
2237 | if (uid_valid(tsk->loginuid)) | 2304 | if (uid_valid(tsk->loginuid)) |
2238 | audit_sig_uid = tsk->loginuid; | 2305 | audit_sig_uid = tsk->loginuid; |
2239 | else | 2306 | else |
@@ -2247,7 +2314,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2247 | /* optimize the common case by putting first signal recipient directly | 2314 | /* optimize the common case by putting first signal recipient directly |
2248 | * in audit_context */ | 2315 | * in audit_context */ |
2249 | if (!ctx->target_pid) { | 2316 | if (!ctx->target_pid) { |
2250 | ctx->target_pid = t->tgid; | 2317 | ctx->target_pid = task_tgid_nr(t); |
2251 | ctx->target_auid = audit_get_loginuid(t); | 2318 | ctx->target_auid = audit_get_loginuid(t); |
2252 | ctx->target_uid = t_uid; | 2319 | ctx->target_uid = t_uid; |
2253 | ctx->target_sessionid = audit_get_sessionid(t); | 2320 | ctx->target_sessionid = audit_get_sessionid(t); |
@@ -2268,7 +2335,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2268 | } | 2335 | } |
2269 | BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); | 2336 | BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); |
2270 | 2337 | ||
2271 | axp->target_pid[axp->pid_count] = t->tgid; | 2338 | axp->target_pid[axp->pid_count] = task_tgid_nr(t); |
2272 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); | 2339 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); |
2273 | axp->target_uid[axp->pid_count] = t_uid; | 2340 | axp->target_uid[axp->pid_count] = t_uid; |
2274 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); | 2341 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); |
@@ -2368,7 +2435,7 @@ static void audit_log_task(struct audit_buffer *ab) | |||
2368 | from_kgid(&init_user_ns, gid), | 2435 | from_kgid(&init_user_ns, gid), |
2369 | sessionid); | 2436 | sessionid); |
2370 | audit_log_task_context(ab); | 2437 | audit_log_task_context(ab); |
2371 | audit_log_format(ab, " pid=%d comm=", current->pid); | 2438 | audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); |
2372 | audit_log_untrustedstring(ab, current->comm); | 2439 | audit_log_untrustedstring(ab, current->comm); |
2373 | if (mm) { | 2440 | if (mm) { |
2374 | down_read(&mm->mmap_sem); | 2441 | down_read(&mm->mmap_sem); |
diff --git a/kernel/futex.c b/kernel/futex.c index 67dacaf93e56..5f589279e462 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -70,7 +70,10 @@ | |||
70 | #include "locking/rtmutex_common.h" | 70 | #include "locking/rtmutex_common.h" |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Basic futex operation and ordering guarantees: | 73 | * READ this before attempting to hack on futexes! |
74 | * | ||
75 | * Basic futex operation and ordering guarantees | ||
76 | * ============================================= | ||
74 | * | 77 | * |
75 | * The waiter reads the futex value in user space and calls | 78 | * The waiter reads the futex value in user space and calls |
76 | * futex_wait(). This function computes the hash bucket and acquires | 79 | * futex_wait(). This function computes the hash bucket and acquires |
@@ -119,7 +122,7 @@ | |||
119 | * sys_futex(WAIT, futex, val); | 122 | * sys_futex(WAIT, futex, val); |
120 | * futex_wait(futex, val); | 123 | * futex_wait(futex, val); |
121 | * | 124 | * |
122 | * waiters++; | 125 | * waiters++; (a) |
123 | * mb(); (A) <-- paired with -. | 126 | * mb(); (A) <-- paired with -. |
124 | * | | 127 | * | |
125 | * lock(hash_bucket(futex)); | | 128 | * lock(hash_bucket(futex)); | |
@@ -135,14 +138,14 @@ | |||
135 | * unlock(hash_bucket(futex)); | 138 | * unlock(hash_bucket(futex)); |
136 | * schedule(); if (waiters) | 139 | * schedule(); if (waiters) |
137 | * lock(hash_bucket(futex)); | 140 | * lock(hash_bucket(futex)); |
138 | * wake_waiters(futex); | 141 | * else wake_waiters(futex); |
139 | * unlock(hash_bucket(futex)); | 142 | * waiters--; (b) unlock(hash_bucket(futex)); |
140 | * | 143 | * |
141 | * Where (A) orders the waiters increment and the futex value read -- this | 144 | * Where (A) orders the waiters increment and the futex value read through |
142 | * is guaranteed by the head counter in the hb spinlock; and where (B) | 145 | * atomic operations (see hb_waiters_inc) and where (B) orders the write |
143 | * orders the write to futex and the waiters read -- this is done by the | 146 | * to futex and the waiters read -- this is done by the barriers in |
144 | * barriers in get_futex_key_refs(), through either ihold or atomic_inc, | 147 | * get_futex_key_refs(), through either ihold or atomic_inc, depending on the |
145 | * depending on the futex type. | 148 | * futex type. |
146 | * | 149 | * |
147 | * This yields the following case (where X:=waiters, Y:=futex): | 150 | * This yields the following case (where X:=waiters, Y:=futex): |
148 | * | 151 | * |
@@ -155,6 +158,17 @@ | |||
155 | * Which guarantees that x==0 && y==0 is impossible; which translates back into | 158 | * Which guarantees that x==0 && y==0 is impossible; which translates back into |
156 | * the guarantee that we cannot both miss the futex variable change and the | 159 | * the guarantee that we cannot both miss the futex variable change and the |
157 | * enqueue. | 160 | * enqueue. |
161 | * | ||
162 | * Note that a new waiter is accounted for in (a) even when it is possible that | ||
163 | * the wait call can return error, in which case we backtrack from it in (b). | ||
164 | * Refer to the comment in queue_lock(). | ||
165 | * | ||
166 | * Similarly, in order to account for waiters being requeued on another | ||
167 | * address we always increment the waiters for the destination bucket before | ||
168 | * acquiring the lock. It then decrements them again after releasing it - | ||
169 | * the code that actually moves the futex(es) between hash buckets (requeue_futex) | ||
170 | * will do the additional required waiter count housekeeping. This is done for | ||
171 | * double_lock_hb() and double_unlock_hb(), respectively. | ||
158 | */ | 172 | */ |
159 | 173 | ||
160 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG | 174 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
@@ -1452,6 +1466,7 @@ retry: | |||
1452 | hb2 = hash_futex(&key2); | 1466 | hb2 = hash_futex(&key2); |
1453 | 1467 | ||
1454 | retry_private: | 1468 | retry_private: |
1469 | hb_waiters_inc(hb2); | ||
1455 | double_lock_hb(hb1, hb2); | 1470 | double_lock_hb(hb1, hb2); |
1456 | 1471 | ||
1457 | if (likely(cmpval != NULL)) { | 1472 | if (likely(cmpval != NULL)) { |
@@ -1461,6 +1476,7 @@ retry_private: | |||
1461 | 1476 | ||
1462 | if (unlikely(ret)) { | 1477 | if (unlikely(ret)) { |
1463 | double_unlock_hb(hb1, hb2); | 1478 | double_unlock_hb(hb1, hb2); |
1479 | hb_waiters_dec(hb2); | ||
1464 | 1480 | ||
1465 | ret = get_user(curval, uaddr1); | 1481 | ret = get_user(curval, uaddr1); |
1466 | if (ret) | 1482 | if (ret) |
@@ -1510,6 +1526,7 @@ retry_private: | |||
1510 | break; | 1526 | break; |
1511 | case -EFAULT: | 1527 | case -EFAULT: |
1512 | double_unlock_hb(hb1, hb2); | 1528 | double_unlock_hb(hb1, hb2); |
1529 | hb_waiters_dec(hb2); | ||
1513 | put_futex_key(&key2); | 1530 | put_futex_key(&key2); |
1514 | put_futex_key(&key1); | 1531 | put_futex_key(&key1); |
1515 | ret = fault_in_user_writeable(uaddr2); | 1532 | ret = fault_in_user_writeable(uaddr2); |
@@ -1519,6 +1536,7 @@ retry_private: | |||
1519 | case -EAGAIN: | 1536 | case -EAGAIN: |
1520 | /* The owner was exiting, try again. */ | 1537 | /* The owner was exiting, try again. */ |
1521 | double_unlock_hb(hb1, hb2); | 1538 | double_unlock_hb(hb1, hb2); |
1539 | hb_waiters_dec(hb2); | ||
1522 | put_futex_key(&key2); | 1540 | put_futex_key(&key2); |
1523 | put_futex_key(&key1); | 1541 | put_futex_key(&key1); |
1524 | cond_resched(); | 1542 | cond_resched(); |
@@ -1594,6 +1612,7 @@ retry_private: | |||
1594 | 1612 | ||
1595 | out_unlock: | 1613 | out_unlock: |
1596 | double_unlock_hb(hb1, hb2); | 1614 | double_unlock_hb(hb1, hb2); |
1615 | hb_waiters_dec(hb2); | ||
1597 | 1616 | ||
1598 | /* | 1617 | /* |
1599 | * drop_futex_key_refs() must be called outside the spinlocks. During | 1618 | * drop_futex_key_refs() must be called outside the spinlocks. During |
diff --git a/kernel/relay.c b/kernel/relay.c index 52d6a6f56261..5a56d3c8dc03 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe, | |||
1195 | 1195 | ||
1196 | static const struct pipe_buf_operations relay_pipe_buf_ops = { | 1196 | static const struct pipe_buf_operations relay_pipe_buf_ops = { |
1197 | .can_merge = 0, | 1197 | .can_merge = 0, |
1198 | .map = generic_pipe_buf_map, | ||
1199 | .unmap = generic_pipe_buf_unmap, | ||
1200 | .confirm = generic_pipe_buf_confirm, | 1198 | .confirm = generic_pipe_buf_confirm, |
1201 | .release = relay_pipe_buf_release, | 1199 | .release = relay_pipe_buf_release, |
1202 | .steal = generic_pipe_buf_steal, | 1200 | .steal = generic_pipe_buf_steal, |
@@ -1253,7 +1251,7 @@ static ssize_t subbuf_splice_actor(struct file *in, | |||
1253 | subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; | 1251 | subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; |
1254 | pidx = (read_start / PAGE_SIZE) % subbuf_pages; | 1252 | pidx = (read_start / PAGE_SIZE) % subbuf_pages; |
1255 | poff = read_start & ~PAGE_MASK; | 1253 | poff = read_start & ~PAGE_MASK; |
1256 | nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); | 1254 | nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); |
1257 | 1255 | ||
1258 | for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { | 1256 | for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { |
1259 | unsigned int this_len, this_end, private; | 1257 | unsigned int this_len, this_end, private; |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index fd609bd9d6dd..d8d046c0726a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -71,7 +71,7 @@ static void populate_seccomp_data(struct seccomp_data *sd) | |||
71 | struct pt_regs *regs = task_pt_regs(task); | 71 | struct pt_regs *regs = task_pt_regs(task); |
72 | 72 | ||
73 | sd->nr = syscall_get_nr(task, regs); | 73 | sd->nr = syscall_get_nr(task, regs); |
74 | sd->arch = syscall_get_arch(task, regs); | 74 | sd->arch = syscall_get_arch(); |
75 | 75 | ||
76 | /* Unroll syscall_get_args to help gcc on arm. */ | 76 | /* Unroll syscall_get_args to help gcc on arm. */ |
77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); | 77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); |
@@ -348,7 +348,7 @@ static void seccomp_send_sigsys(int syscall, int reason) | |||
348 | info.si_code = SYS_SECCOMP; | 348 | info.si_code = SYS_SECCOMP; |
349 | info.si_call_addr = (void __user *)KSTK_EIP(current); | 349 | info.si_call_addr = (void __user *)KSTK_EIP(current); |
350 | info.si_errno = reason; | 350 | info.si_errno = reason; |
351 | info.si_arch = syscall_get_arch(current, task_pt_regs(current)); | 351 | info.si_arch = syscall_get_arch(); |
352 | info.si_syscall = syscall; | 352 | info.si_syscall = syscall; |
353 | force_sig_info(SIGSYS, &info, current); | 353 | force_sig_info(SIGSYS, &info, current); |
354 | } | 354 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9be67c5e5b0f..737b0efa1a62 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3611,6 +3611,8 @@ static const char readme_msg[] = | |||
3611 | #ifdef CONFIG_TRACER_SNAPSHOT | 3611 | #ifdef CONFIG_TRACER_SNAPSHOT |
3612 | "\t\t snapshot\n" | 3612 | "\t\t snapshot\n" |
3613 | #endif | 3613 | #endif |
3614 | "\t\t dump\n" | ||
3615 | "\t\t cpudump\n" | ||
3614 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" | 3616 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" |
3615 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" | 3617 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" |
3616 | "\t The first one will disable tracing every time do_fault is hit\n" | 3618 | "\t The first one will disable tracing every time do_fault is hit\n" |
@@ -4390,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
4390 | 4392 | ||
4391 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 4393 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
4392 | .can_merge = 0, | 4394 | .can_merge = 0, |
4393 | .map = generic_pipe_buf_map, | ||
4394 | .unmap = generic_pipe_buf_unmap, | ||
4395 | .confirm = generic_pipe_buf_confirm, | 4395 | .confirm = generic_pipe_buf_confirm, |
4396 | .release = generic_pipe_buf_release, | 4396 | .release = generic_pipe_buf_release, |
4397 | .steal = generic_pipe_buf_steal, | 4397 | .steal = generic_pipe_buf_steal, |
@@ -4486,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
4486 | trace_access_lock(iter->cpu_file); | 4486 | trace_access_lock(iter->cpu_file); |
4487 | 4487 | ||
4488 | /* Fill as many pages as possible. */ | 4488 | /* Fill as many pages as possible. */ |
4489 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { | 4489 | for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { |
4490 | spd.pages[i] = alloc_page(GFP_KERNEL); | 4490 | spd.pages[i] = alloc_page(GFP_KERNEL); |
4491 | if (!spd.pages[i]) | 4491 | if (!spd.pages[i]) |
4492 | break; | 4492 | break; |
@@ -5279,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
5279 | /* Pipe buffer operations for a buffer. */ | 5279 | /* Pipe buffer operations for a buffer. */ |
5280 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 5280 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
5281 | .can_merge = 0, | 5281 | .can_merge = 0, |
5282 | .map = generic_pipe_buf_map, | ||
5283 | .unmap = generic_pipe_buf_unmap, | ||
5284 | .confirm = generic_pipe_buf_confirm, | 5282 | .confirm = generic_pipe_buf_confirm, |
5285 | .release = buffer_pipe_buf_release, | 5283 | .release = buffer_pipe_buf_release, |
5286 | .steal = generic_pipe_buf_steal, | 5284 | .steal = generic_pipe_buf_steal, |
@@ -5356,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5356 | trace_access_lock(iter->cpu_file); | 5354 | trace_access_lock(iter->cpu_file); |
5357 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); | 5355 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
5358 | 5356 | ||
5359 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { | 5357 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
5360 | struct page *page; | 5358 | struct page *page; |
5361 | int r; | 5359 | int r; |
5362 | 5360 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 83a4378dc5e0..3ddfd8f62c05 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call, | |||
223 | { | 223 | { |
224 | struct ftrace_event_file *file = data; | 224 | struct ftrace_event_file *file = data; |
225 | 225 | ||
226 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); | ||
226 | switch (type) { | 227 | switch (type) { |
227 | case TRACE_REG_REGISTER: | 228 | case TRACE_REG_REGISTER: |
228 | return tracepoint_probe_register(call->name, | 229 | return tracepoint_probe_register(call->tp, |
229 | call->class->probe, | 230 | call->class->probe, |
230 | file); | 231 | file); |
231 | case TRACE_REG_UNREGISTER: | 232 | case TRACE_REG_UNREGISTER: |
232 | tracepoint_probe_unregister(call->name, | 233 | tracepoint_probe_unregister(call->tp, |
233 | call->class->probe, | 234 | call->class->probe, |
234 | file); | 235 | file); |
235 | return 0; | 236 | return 0; |
236 | 237 | ||
237 | #ifdef CONFIG_PERF_EVENTS | 238 | #ifdef CONFIG_PERF_EVENTS |
238 | case TRACE_REG_PERF_REGISTER: | 239 | case TRACE_REG_PERF_REGISTER: |
239 | return tracepoint_probe_register(call->name, | 240 | return tracepoint_probe_register(call->tp, |
240 | call->class->perf_probe, | 241 | call->class->perf_probe, |
241 | call); | 242 | call); |
242 | case TRACE_REG_PERF_UNREGISTER: | 243 | case TRACE_REG_PERF_UNREGISTER: |
243 | tracepoint_probe_unregister(call->name, | 244 | tracepoint_probe_unregister(call->tp, |
244 | call->class->perf_probe, | 245 | call->class->perf_probe, |
245 | call); | 246 | call); |
246 | return 0; | 247 | return 0; |
@@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
352 | if (ret) { | 353 | if (ret) { |
353 | tracing_stop_cmdline_record(); | 354 | tracing_stop_cmdline_record(); |
354 | pr_info("event trace: Could not enable event " | 355 | pr_info("event trace: Could not enable event " |
355 | "%s\n", call->name); | 356 | "%s\n", ftrace_event_name(call)); |
356 | break; | 357 | break; |
357 | } | 358 | } |
358 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 359 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
@@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, | |||
481 | { | 482 | { |
482 | struct ftrace_event_file *file; | 483 | struct ftrace_event_file *file; |
483 | struct ftrace_event_call *call; | 484 | struct ftrace_event_call *call; |
485 | const char *name; | ||
484 | int ret = -EINVAL; | 486 | int ret = -EINVAL; |
485 | 487 | ||
486 | list_for_each_entry(file, &tr->events, list) { | 488 | list_for_each_entry(file, &tr->events, list) { |
487 | 489 | ||
488 | call = file->event_call; | 490 | call = file->event_call; |
491 | name = ftrace_event_name(call); | ||
489 | 492 | ||
490 | if (!call->name || !call->class || !call->class->reg) | 493 | if (!name || !call->class || !call->class->reg) |
491 | continue; | 494 | continue; |
492 | 495 | ||
493 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 496 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
494 | continue; | 497 | continue; |
495 | 498 | ||
496 | if (match && | 499 | if (match && |
497 | strcmp(match, call->name) != 0 && | 500 | strcmp(match, name) != 0 && |
498 | strcmp(match, call->class->system) != 0) | 501 | strcmp(match, call->class->system) != 0) |
499 | continue; | 502 | continue; |
500 | 503 | ||
501 | if (sub && strcmp(sub, call->class->system) != 0) | 504 | if (sub && strcmp(sub, call->class->system) != 0) |
502 | continue; | 505 | continue; |
503 | 506 | ||
504 | if (event && strcmp(event, call->name) != 0) | 507 | if (event && strcmp(event, name) != 0) |
505 | continue; | 508 | continue; |
506 | 509 | ||
507 | ftrace_event_enable_disable(file, set); | 510 | ftrace_event_enable_disable(file, set); |
@@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v) | |||
699 | 702 | ||
700 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 703 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
701 | seq_printf(m, "%s:", call->class->system); | 704 | seq_printf(m, "%s:", call->class->system); |
702 | seq_printf(m, "%s\n", call->name); | 705 | seq_printf(m, "%s\n", ftrace_event_name(call)); |
703 | 706 | ||
704 | return 0; | 707 | return 0; |
705 | } | 708 | } |
@@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
792 | mutex_lock(&event_mutex); | 795 | mutex_lock(&event_mutex); |
793 | list_for_each_entry(file, &tr->events, list) { | 796 | list_for_each_entry(file, &tr->events, list) { |
794 | call = file->event_call; | 797 | call = file->event_call; |
795 | if (!call->name || !call->class || !call->class->reg) | 798 | if (!ftrace_event_name(call) || !call->class || !call->class->reg) |
796 | continue; | 799 | continue; |
797 | 800 | ||
798 | if (system && strcmp(call->class->system, system->name) != 0) | 801 | if (system && strcmp(call->class->system, system->name) != 0) |
@@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v) | |||
907 | 910 | ||
908 | switch ((unsigned long)v) { | 911 | switch ((unsigned long)v) { |
909 | case FORMAT_HEADER: | 912 | case FORMAT_HEADER: |
910 | seq_printf(m, "name: %s\n", call->name); | 913 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
911 | seq_printf(m, "ID: %d\n", call->event.type); | 914 | seq_printf(m, "ID: %d\n", call->event.type); |
912 | seq_printf(m, "format:\n"); | 915 | seq_printf(m, "format:\n"); |
913 | return 0; | 916 | return 0; |
@@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1527 | struct trace_array *tr = file->tr; | 1530 | struct trace_array *tr = file->tr; |
1528 | struct list_head *head; | 1531 | struct list_head *head; |
1529 | struct dentry *d_events; | 1532 | struct dentry *d_events; |
1533 | const char *name; | ||
1530 | int ret; | 1534 | int ret; |
1531 | 1535 | ||
1532 | /* | 1536 | /* |
@@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1540 | } else | 1544 | } else |
1541 | d_events = parent; | 1545 | d_events = parent; |
1542 | 1546 | ||
1543 | file->dir = debugfs_create_dir(call->name, d_events); | 1547 | name = ftrace_event_name(call); |
1548 | file->dir = debugfs_create_dir(name, d_events); | ||
1544 | if (!file->dir) { | 1549 | if (!file->dir) { |
1545 | pr_warning("Could not create debugfs '%s' directory\n", | 1550 | pr_warning("Could not create debugfs '%s' directory\n", |
1546 | call->name); | 1551 | name); |
1547 | return -1; | 1552 | return -1; |
1548 | } | 1553 | } |
1549 | 1554 | ||
@@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1567 | ret = call->class->define_fields(call); | 1572 | ret = call->class->define_fields(call); |
1568 | if (ret < 0) { | 1573 | if (ret < 0) { |
1569 | pr_warning("Could not initialize trace point" | 1574 | pr_warning("Could not initialize trace point" |
1570 | " events/%s\n", call->name); | 1575 | " events/%s\n", name); |
1571 | return -1; | 1576 | return -1; |
1572 | } | 1577 | } |
1573 | } | 1578 | } |
@@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call) | |||
1631 | static int event_init(struct ftrace_event_call *call) | 1636 | static int event_init(struct ftrace_event_call *call) |
1632 | { | 1637 | { |
1633 | int ret = 0; | 1638 | int ret = 0; |
1639 | const char *name; | ||
1634 | 1640 | ||
1635 | if (WARN_ON(!call->name)) | 1641 | name = ftrace_event_name(call); |
1642 | if (WARN_ON(!name)) | ||
1636 | return -EINVAL; | 1643 | return -EINVAL; |
1637 | 1644 | ||
1638 | if (call->class->raw_init) { | 1645 | if (call->class->raw_init) { |
1639 | ret = call->class->raw_init(call); | 1646 | ret = call->class->raw_init(call); |
1640 | if (ret < 0 && ret != -ENOSYS) | 1647 | if (ret < 0 && ret != -ENOSYS) |
1641 | pr_warn("Could not initialize trace events/%s\n", | 1648 | pr_warn("Could not initialize trace events/%s\n", |
1642 | call->name); | 1649 | name); |
1643 | } | 1650 | } |
1644 | 1651 | ||
1645 | return ret; | 1652 | return ret; |
@@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
1885 | ret = __trace_add_new_event(call, tr); | 1892 | ret = __trace_add_new_event(call, tr); |
1886 | if (ret < 0) | 1893 | if (ret < 0) |
1887 | pr_warning("Could not create directory for event %s\n", | 1894 | pr_warning("Could not create directory for event %s\n", |
1888 | call->name); | 1895 | ftrace_event_name(call)); |
1889 | } | 1896 | } |
1890 | } | 1897 | } |
1891 | 1898 | ||
@@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
1894 | { | 1901 | { |
1895 | struct ftrace_event_file *file; | 1902 | struct ftrace_event_file *file; |
1896 | struct ftrace_event_call *call; | 1903 | struct ftrace_event_call *call; |
1904 | const char *name; | ||
1897 | 1905 | ||
1898 | list_for_each_entry(file, &tr->events, list) { | 1906 | list_for_each_entry(file, &tr->events, list) { |
1899 | 1907 | ||
1900 | call = file->event_call; | 1908 | call = file->event_call; |
1909 | name = ftrace_event_name(call); | ||
1901 | 1910 | ||
1902 | if (!call->name || !call->class || !call->class->reg) | 1911 | if (!name || !call->class || !call->class->reg) |
1903 | continue; | 1912 | continue; |
1904 | 1913 | ||
1905 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 1914 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
1906 | continue; | 1915 | continue; |
1907 | 1916 | ||
1908 | if (strcmp(event, call->name) == 0 && | 1917 | if (strcmp(event, name) == 0 && |
1909 | strcmp(system, call->class->system) == 0) | 1918 | strcmp(system, call->class->system) == 0) |
1910 | return file; | 1919 | return file; |
1911 | } | 1920 | } |
@@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
1973 | seq_printf(m, "%s:%s:%s", | 1982 | seq_printf(m, "%s:%s:%s", |
1974 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1983 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
1975 | data->file->event_call->class->system, | 1984 | data->file->event_call->class->system, |
1976 | data->file->event_call->name); | 1985 | ftrace_event_name(data->file->event_call)); |
1977 | 1986 | ||
1978 | if (data->count == -1) | 1987 | if (data->count == -1) |
1979 | seq_printf(m, ":unlimited\n"); | 1988 | seq_printf(m, ":unlimited\n"); |
@@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
2193 | ret = event_create_dir(tr->event_dir, file); | 2202 | ret = event_create_dir(tr->event_dir, file); |
2194 | if (ret < 0) | 2203 | if (ret < 0) |
2195 | pr_warning("Could not create directory for event %s\n", | 2204 | pr_warning("Could not create directory for event %s\n", |
2196 | file->event_call->name); | 2205 | ftrace_event_name(file->event_call)); |
2197 | } | 2206 | } |
2198 | } | 2207 | } |
2199 | 2208 | ||
@@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr) | |||
2217 | ret = __trace_early_add_new_event(call, tr); | 2226 | ret = __trace_early_add_new_event(call, tr); |
2218 | if (ret < 0) | 2227 | if (ret < 0) |
2219 | pr_warning("Could not create early event %s\n", | 2228 | pr_warning("Could not create early event %s\n", |
2220 | call->name); | 2229 | ftrace_event_name(call)); |
2221 | } | 2230 | } |
2222 | } | 2231 | } |
2223 | 2232 | ||
@@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void) | |||
2549 | continue; | 2558 | continue; |
2550 | #endif | 2559 | #endif |
2551 | 2560 | ||
2552 | pr_info("Testing event %s: ", call->name); | 2561 | pr_info("Testing event %s: ", ftrace_event_name(call)); |
2553 | 2562 | ||
2554 | /* | 2563 | /* |
2555 | * If an event is already enabled, someone is using | 2564 | * If an event is already enabled, someone is using |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8efbb69b04f0..925f537f07d1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
1095 | seq_printf(m, "%s:%s:%s", | 1095 | seq_printf(m, "%s:%s:%s", |
1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
1097 | enable_data->file->event_call->class->system, | 1097 | enable_data->file->event_call->class->system, |
1098 | enable_data->file->event_call->name); | 1098 | ftrace_event_name(enable_data->file->event_call)); |
1099 | 1099 | ||
1100 | if (data->count == -1) | 1100 | if (data->count == -1) |
1101 | seq_puts(m, ":unlimited"); | 1101 | seq_puts(m, ":unlimited"); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index ee0a5098ac43..d4ddde28a81a 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \ | |||
173 | }; \ | 173 | }; \ |
174 | \ | 174 | \ |
175 | struct ftrace_event_call __used event_##call = { \ | 175 | struct ftrace_event_call __used event_##call = { \ |
176 | .name = #call, \ | ||
177 | .event.type = etype, \ | ||
178 | .class = &event_class_ftrace_##call, \ | 176 | .class = &event_class_ftrace_##call, \ |
177 | { \ | ||
178 | .name = #call, \ | ||
179 | }, \ | ||
180 | .event.type = etype, \ | ||
179 | .print_fmt = print, \ | 181 | .print_fmt = print, \ |
180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ | 182 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
181 | }; \ | 183 | }; \ |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d021d21dd150..903ae28962be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
341 | struct trace_kprobe *tk; | 341 | struct trace_kprobe *tk; |
342 | 342 | ||
343 | list_for_each_entry(tk, &probe_list, list) | 343 | list_for_each_entry(tk, &probe_list, list) |
344 | if (strcmp(tk->tp.call.name, event) == 0 && | 344 | if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && |
345 | strcmp(tk->tp.call.class->system, group) == 0) | 345 | strcmp(tk->tp.call.class->system, group) == 0) |
346 | return tk; | 346 | return tk; |
347 | return NULL; | 347 | return NULL; |
@@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk) | |||
516 | mutex_lock(&probe_lock); | 516 | mutex_lock(&probe_lock); |
517 | 517 | ||
518 | /* Delete old (same name) event if exist */ | 518 | /* Delete old (same name) event if exist */ |
519 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); | 519 | old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), |
520 | tk->tp.call.class->system); | ||
520 | if (old_tk) { | 521 | if (old_tk) { |
521 | ret = unregister_trace_kprobe(old_tk); | 522 | ret = unregister_trace_kprobe(old_tk); |
522 | if (ret < 0) | 523 | if (ret < 0) |
@@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, | |||
564 | if (ret) | 565 | if (ret) |
565 | pr_warning("Failed to re-register probe %s on" | 566 | pr_warning("Failed to re-register probe %s on" |
566 | "%s: %d\n", | 567 | "%s: %d\n", |
567 | tk->tp.call.name, mod->name, ret); | 568 | ftrace_event_name(&tk->tp.call), |
569 | mod->name, ret); | ||
568 | } | 570 | } |
569 | } | 571 | } |
570 | mutex_unlock(&probe_lock); | 572 | mutex_unlock(&probe_lock); |
@@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
818 | int i; | 820 | int i; |
819 | 821 | ||
820 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 822 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
821 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); | 823 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
824 | ftrace_event_name(&tk->tp.call)); | ||
822 | 825 | ||
823 | if (!tk->symbol) | 826 | if (!tk->symbol) |
824 | seq_printf(m, " 0x%p", tk->rp.kp.addr); | 827 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
@@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
876 | { | 879 | { |
877 | struct trace_kprobe *tk = v; | 880 | struct trace_kprobe *tk = v; |
878 | 881 | ||
879 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, | 882 | seq_printf(m, " %-44s %15lu %15lu\n", |
883 | ftrace_event_name(&tk->tp.call), tk->nhit, | ||
880 | tk->rp.kp.nmissed); | 884 | tk->rp.kp.nmissed); |
881 | 885 | ||
882 | return 0; | 886 | return 0; |
@@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1011 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1015 | field = (struct kprobe_trace_entry_head *)iter->ent; |
1012 | tp = container_of(event, struct trace_probe, call.event); | 1016 | tp = container_of(event, struct trace_probe, call.event); |
1013 | 1017 | ||
1014 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1018 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
1015 | goto partial; | 1019 | goto partial; |
1016 | 1020 | ||
1017 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1021 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
@@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1047 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1051 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
1048 | tp = container_of(event, struct trace_probe, call.event); | 1052 | tp = container_of(event, struct trace_probe, call.event); |
1049 | 1053 | ||
1050 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1054 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
1051 | goto partial; | 1055 | goto partial; |
1052 | 1056 | ||
1053 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1057 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
@@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
1286 | call->data = tk; | 1290 | call->data = tk; |
1287 | ret = trace_add_event_call(call); | 1291 | ret = trace_add_event_call(call); |
1288 | if (ret) { | 1292 | if (ret) { |
1289 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1293 | pr_info("Failed to register kprobe event: %s\n", |
1294 | ftrace_event_name(call)); | ||
1290 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
1291 | unregister_ftrace_event(&call->event); | 1296 | unregister_ftrace_event(&call->event); |
1292 | } | 1297 | } |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ca0e79e2abaa..a436de18aa99 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
431 | } | 431 | } |
432 | 432 | ||
433 | trace_seq_init(p); | 433 | trace_seq_init(p); |
434 | ret = trace_seq_printf(s, "%s: ", event->name); | 434 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
435 | if (!ret) | 435 | if (!ret) |
436 | return TRACE_TYPE_PARTIAL_LINE; | 436 | return TRACE_TYPE_PARTIAL_LINE; |
437 | 437 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e4473367e7a4..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
294 | struct trace_uprobe *tu; | 294 | struct trace_uprobe *tu; |
295 | 295 | ||
296 | list_for_each_entry(tu, &uprobe_list, list) | 296 | list_for_each_entry(tu, &uprobe_list, list) |
297 | if (strcmp(tu->tp.call.name, event) == 0 && | 297 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && |
298 | strcmp(tu->tp.call.class->system, group) == 0) | 298 | strcmp(tu->tp.call.class->system, group) == 0) |
299 | return tu; | 299 | return tu; |
300 | 300 | ||
@@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
324 | mutex_lock(&uprobe_lock); | 324 | mutex_lock(&uprobe_lock); |
325 | 325 | ||
326 | /* register as an event */ | 326 | /* register as an event */ |
327 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); | 327 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), |
328 | tu->tp.call.class->system); | ||
328 | if (old_tu) { | 329 | if (old_tu) { |
329 | /* delete old event */ | 330 | /* delete old event */ |
330 | ret = unregister_trace_uprobe(old_tu); | 331 | ret = unregister_trace_uprobe(old_tu); |
@@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
599 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 600 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
600 | int i; | 601 | int i; |
601 | 602 | ||
602 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); | 603 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
604 | ftrace_event_name(&tu->tp.call)); | ||
603 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 605 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
604 | 606 | ||
605 | for (i = 0; i < tu->tp.nr_args; i++) | 607 | for (i = 0; i < tu->tp.nr_args; i++) |
@@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
649 | { | 651 | { |
650 | struct trace_uprobe *tu = v; | 652 | struct trace_uprobe *tu = v; |
651 | 653 | ||
652 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); | 654 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
655 | ftrace_event_name(&tu->tp.call), tu->nhit); | ||
653 | return 0; | 656 | return 0; |
654 | } | 657 | } |
655 | 658 | ||
@@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
844 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 847 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
845 | 848 | ||
846 | if (is_ret_probe(tu)) { | 849 | if (is_ret_probe(tu)) { |
847 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, | 850 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
851 | ftrace_event_name(&tu->tp.call), | ||
848 | entry->vaddr[1], entry->vaddr[0])) | 852 | entry->vaddr[1], entry->vaddr[0])) |
849 | goto partial; | 853 | goto partial; |
850 | data = DATAOF_TRACE_ENTRY(entry, true); | 854 | data = DATAOF_TRACE_ENTRY(entry, true); |
851 | } else { | 855 | } else { |
852 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, | 856 | if (!trace_seq_printf(s, "%s: (0x%lx)", |
857 | ftrace_event_name(&tu->tp.call), | ||
853 | entry->vaddr[0])) | 858 | entry->vaddr[0])) |
854 | goto partial; | 859 | goto partial; |
855 | data = DATAOF_TRACE_ENTRY(entry, false); | 860 | data = DATAOF_TRACE_ENTRY(entry, false); |
@@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
1275 | ret = trace_add_event_call(call); | 1280 | ret = trace_add_event_call(call); |
1276 | 1281 | ||
1277 | if (ret) { | 1282 | if (ret) { |
1278 | pr_info("Failed to register uprobe event: %s\n", call->name); | 1283 | pr_info("Failed to register uprobe event: %s\n", |
1284 | ftrace_event_name(call)); | ||
1279 | kfree(call->print_fmt); | 1285 | kfree(call->print_fmt); |
1280 | unregister_ftrace_event(&call->event); | 1286 | unregister_ftrace_event(&call->event); |
1281 | } | 1287 | } |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index fb0a38a26555..ac5b23cf7212 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Mathieu Desnoyers | 2 | * Copyright (C) 2008-2014 Mathieu Desnoyers |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[]; | |||
33 | /* Set to 1 to enable tracepoint debug output */ | 33 | /* Set to 1 to enable tracepoint debug output */ |
34 | static const int tracepoint_debug; | 34 | static const int tracepoint_debug; |
35 | 35 | ||
36 | #ifdef CONFIG_MODULES | ||
36 | /* | 37 | /* |
37 | * Tracepoints mutex protects the builtin and module tracepoints and the hash | 38 | * Tracepoint module list mutex protects the local module list. |
38 | * table, as well as the local module list. | ||
39 | */ | 39 | */ |
40 | static DEFINE_MUTEX(tracepoints_mutex); | 40 | static DEFINE_MUTEX(tracepoint_module_list_mutex); |
41 | 41 | ||
42 | #ifdef CONFIG_MODULES | 42 | /* Local list of struct tp_module */ |
43 | /* Local list of struct module */ | ||
44 | static LIST_HEAD(tracepoint_module_list); | 43 | static LIST_HEAD(tracepoint_module_list); |
45 | #endif /* CONFIG_MODULES */ | 44 | #endif /* CONFIG_MODULES */ |
46 | 45 | ||
47 | /* | 46 | /* |
48 | * Tracepoint hash table, containing the active tracepoints. | 47 | * tracepoints_mutex protects the builtin and module tracepoints. |
49 | * Protected by tracepoints_mutex. | 48 | * tracepoints_mutex nests inside tracepoint_module_list_mutex. |
50 | */ | 49 | */ |
51 | #define TRACEPOINT_HASH_BITS 6 | 50 | static DEFINE_MUTEX(tracepoints_mutex); |
52 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | ||
53 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | ||
54 | 51 | ||
55 | /* | 52 | /* |
56 | * Note about RCU : | 53 | * Note about RCU : |
57 | * It is used to delay the free of multiple probes array until a quiescent | 54 | * It is used to delay the free of multiple probes array until a quiescent |
58 | * state is reached. | 55 | * state is reached. |
59 | * Tracepoint entries modifications are protected by the tracepoints_mutex. | ||
60 | */ | 56 | */ |
61 | struct tracepoint_entry { | ||
62 | struct hlist_node hlist; | ||
63 | struct tracepoint_func *funcs; | ||
64 | int refcount; /* Number of times armed. 0 if disarmed. */ | ||
65 | int enabled; /* Tracepoint enabled */ | ||
66 | char name[0]; | ||
67 | }; | ||
68 | |||
69 | struct tp_probes { | 57 | struct tp_probes { |
70 | struct rcu_head rcu; | 58 | struct rcu_head rcu; |
71 | struct tracepoint_func probes[0]; | 59 | struct tracepoint_func probes[0]; |
@@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old) | |||
92 | } | 80 | } |
93 | } | 81 | } |
94 | 82 | ||
95 | static void debug_print_probes(struct tracepoint_entry *entry) | 83 | static void debug_print_probes(struct tracepoint_func *funcs) |
96 | { | 84 | { |
97 | int i; | 85 | int i; |
98 | 86 | ||
99 | if (!tracepoint_debug || !entry->funcs) | 87 | if (!tracepoint_debug || !funcs) |
100 | return; | 88 | return; |
101 | 89 | ||
102 | for (i = 0; entry->funcs[i].func; i++) | 90 | for (i = 0; funcs[i].func; i++) |
103 | printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); | 91 | printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); |
104 | } | 92 | } |
105 | 93 | ||
106 | static struct tracepoint_func * | 94 | static struct tracepoint_func *func_add(struct tracepoint_func **funcs, |
107 | tracepoint_entry_add_probe(struct tracepoint_entry *entry, | 95 | struct tracepoint_func *tp_func) |
108 | void *probe, void *data) | ||
109 | { | 96 | { |
110 | int nr_probes = 0; | 97 | int nr_probes = 0; |
111 | struct tracepoint_func *old, *new; | 98 | struct tracepoint_func *old, *new; |
112 | 99 | ||
113 | if (WARN_ON(!probe)) | 100 | if (WARN_ON(!tp_func->func)) |
114 | return ERR_PTR(-EINVAL); | 101 | return ERR_PTR(-EINVAL); |
115 | 102 | ||
116 | debug_print_probes(entry); | 103 | debug_print_probes(*funcs); |
117 | old = entry->funcs; | 104 | old = *funcs; |
118 | if (old) { | 105 | if (old) { |
119 | /* (N -> N+1), (N != 0, 1) probes */ | 106 | /* (N -> N+1), (N != 0, 1) probes */ |
120 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) | 107 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) |
121 | if (old[nr_probes].func == probe && | 108 | if (old[nr_probes].func == tp_func->func && |
122 | old[nr_probes].data == data) | 109 | old[nr_probes].data == tp_func->data) |
123 | return ERR_PTR(-EEXIST); | 110 | return ERR_PTR(-EEXIST); |
124 | } | 111 | } |
125 | /* + 2 : one for new probe, one for NULL func */ | 112 | /* + 2 : one for new probe, one for NULL func */ |
@@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, | |||
128 | return ERR_PTR(-ENOMEM); | 115 | return ERR_PTR(-ENOMEM); |
129 | if (old) | 116 | if (old) |
130 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); | 117 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); |
131 | new[nr_probes].func = probe; | 118 | new[nr_probes] = *tp_func; |
132 | new[nr_probes].data = data; | ||
133 | new[nr_probes + 1].func = NULL; | 119 | new[nr_probes + 1].func = NULL; |
134 | entry->refcount = nr_probes + 1; | 120 | *funcs = new; |
135 | entry->funcs = new; | 121 | debug_print_probes(*funcs); |
136 | debug_print_probes(entry); | ||
137 | return old; | 122 | return old; |
138 | } | 123 | } |
139 | 124 | ||
140 | static void * | 125 | static void *func_remove(struct tracepoint_func **funcs, |
141 | tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | 126 | struct tracepoint_func *tp_func) |
142 | void *probe, void *data) | ||
143 | { | 127 | { |
144 | int nr_probes = 0, nr_del = 0, i; | 128 | int nr_probes = 0, nr_del = 0, i; |
145 | struct tracepoint_func *old, *new; | 129 | struct tracepoint_func *old, *new; |
146 | 130 | ||
147 | old = entry->funcs; | 131 | old = *funcs; |
148 | 132 | ||
149 | if (!old) | 133 | if (!old) |
150 | return ERR_PTR(-ENOENT); | 134 | return ERR_PTR(-ENOENT); |
151 | 135 | ||
152 | debug_print_probes(entry); | 136 | debug_print_probes(*funcs); |
153 | /* (N -> M), (N > 1, M >= 0) probes */ | 137 | /* (N -> M), (N > 1, M >= 0) probes */ |
154 | if (probe) { | 138 | if (tp_func->func) { |
155 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { | 139 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
156 | if (old[nr_probes].func == probe && | 140 | if (old[nr_probes].func == tp_func->func && |
157 | old[nr_probes].data == data) | 141 | old[nr_probes].data == tp_func->data) |
158 | nr_del++; | 142 | nr_del++; |
159 | } | 143 | } |
160 | } | 144 | } |
@@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
165 | */ | 149 | */ |
166 | if (nr_probes - nr_del == 0) { | 150 | if (nr_probes - nr_del == 0) { |
167 | /* N -> 0, (N > 1) */ | 151 | /* N -> 0, (N > 1) */ |
168 | entry->funcs = NULL; | 152 | *funcs = NULL; |
169 | entry->refcount = 0; | 153 | debug_print_probes(*funcs); |
170 | debug_print_probes(entry); | ||
171 | return old; | 154 | return old; |
172 | } else { | 155 | } else { |
173 | int j = 0; | 156 | int j = 0; |
@@ -177,91 +160,35 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
177 | if (new == NULL) | 160 | if (new == NULL) |
178 | return ERR_PTR(-ENOMEM); | 161 | return ERR_PTR(-ENOMEM); |
179 | for (i = 0; old[i].func; i++) | 162 | for (i = 0; old[i].func; i++) |
180 | if (old[i].func != probe || old[i].data != data) | 163 | if (old[i].func != tp_func->func |
164 | || old[i].data != tp_func->data) | ||
181 | new[j++] = old[i]; | 165 | new[j++] = old[i]; |
182 | new[nr_probes - nr_del].func = NULL; | 166 | new[nr_probes - nr_del].func = NULL; |
183 | entry->refcount = nr_probes - nr_del; | 167 | *funcs = new; |
184 | entry->funcs = new; | ||
185 | } | 168 | } |
186 | debug_print_probes(entry); | 169 | debug_print_probes(*funcs); |
187 | return old; | 170 | return old; |
188 | } | 171 | } |
189 | 172 | ||
190 | /* | 173 | /* |
191 | * Get tracepoint if the tracepoint is present in the tracepoint hash table. | 174 | * Add the probe function to a tracepoint. |
192 | * Must be called with tracepoints_mutex held. | ||
193 | * Returns NULL if not present. | ||
194 | */ | 175 | */ |
195 | static struct tracepoint_entry *get_tracepoint(const char *name) | 176 | static int tracepoint_add_func(struct tracepoint *tp, |
177 | struct tracepoint_func *func) | ||
196 | { | 178 | { |
197 | struct hlist_head *head; | 179 | struct tracepoint_func *old, *tp_funcs; |
198 | struct tracepoint_entry *e; | ||
199 | u32 hash = jhash(name, strlen(name), 0); | ||
200 | |||
201 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
202 | hlist_for_each_entry(e, head, hlist) { | ||
203 | if (!strcmp(name, e->name)) | ||
204 | return e; | ||
205 | } | ||
206 | return NULL; | ||
207 | } | ||
208 | 180 | ||
209 | /* | 181 | if (tp->regfunc && !static_key_enabled(&tp->key)) |
210 | * Add the tracepoint to the tracepoint hash table. Must be called with | 182 | tp->regfunc(); |
211 | * tracepoints_mutex held. | ||
212 | */ | ||
213 | static struct tracepoint_entry *add_tracepoint(const char *name) | ||
214 | { | ||
215 | struct hlist_head *head; | ||
216 | struct tracepoint_entry *e; | ||
217 | size_t name_len = strlen(name) + 1; | ||
218 | u32 hash = jhash(name, name_len-1, 0); | ||
219 | |||
220 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
221 | hlist_for_each_entry(e, head, hlist) { | ||
222 | if (!strcmp(name, e->name)) { | ||
223 | printk(KERN_NOTICE | ||
224 | "tracepoint %s busy\n", name); | ||
225 | return ERR_PTR(-EEXIST); /* Already there */ | ||
226 | } | ||
227 | } | ||
228 | /* | ||
229 | * Using kmalloc here to allocate a variable length element. Could | ||
230 | * cause some memory fragmentation if overused. | ||
231 | */ | ||
232 | e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); | ||
233 | if (!e) | ||
234 | return ERR_PTR(-ENOMEM); | ||
235 | memcpy(&e->name[0], name, name_len); | ||
236 | e->funcs = NULL; | ||
237 | e->refcount = 0; | ||
238 | e->enabled = 0; | ||
239 | hlist_add_head(&e->hlist, head); | ||
240 | return e; | ||
241 | } | ||
242 | 183 | ||
243 | /* | 184 | tp_funcs = rcu_dereference_protected(tp->funcs, |
244 | * Remove the tracepoint from the tracepoint hash table. Must be called with | 185 | lockdep_is_held(&tracepoints_mutex)); |
245 | * mutex_lock held. | 186 | old = func_add(&tp_funcs, func); |
246 | */ | 187 | if (IS_ERR(old)) { |
247 | static inline void remove_tracepoint(struct tracepoint_entry *e) | 188 | WARN_ON_ONCE(1); |
248 | { | 189 | return PTR_ERR(old); |
249 | hlist_del(&e->hlist); | 190 | } |
250 | kfree(e); | 191 | release_probes(old); |
251 | } | ||
252 | |||
253 | /* | ||
254 | * Sets the probe callback corresponding to one tracepoint. | ||
255 | */ | ||
256 | static void set_tracepoint(struct tracepoint_entry **entry, | ||
257 | struct tracepoint *elem, int active) | ||
258 | { | ||
259 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | ||
260 | |||
261 | if (elem->regfunc && !static_key_enabled(&elem->key) && active) | ||
262 | elem->regfunc(); | ||
263 | else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) | ||
264 | elem->unregfunc(); | ||
265 | 192 | ||
266 | /* | 193 | /* |
267 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new | 194 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new |
@@ -270,193 +197,90 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
270 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() | 197 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() |
271 | * is used. | 198 | * is used. |
272 | */ | 199 | */ |
273 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 200 | rcu_assign_pointer(tp->funcs, tp_funcs); |
274 | if (active && !static_key_enabled(&elem->key)) | 201 | if (!static_key_enabled(&tp->key)) |
275 | static_key_slow_inc(&elem->key); | 202 | static_key_slow_inc(&tp->key); |
276 | else if (!active && static_key_enabled(&elem->key)) | 203 | return 0; |
277 | static_key_slow_dec(&elem->key); | ||
278 | } | 204 | } |
279 | 205 | ||
280 | /* | 206 | /* |
281 | * Disable a tracepoint and its probe callback. | 207 | * Remove a probe function from a tracepoint. |
282 | * Note: only waiting an RCU period after setting elem->call to the empty | 208 | * Note: only waiting an RCU period after setting elem->call to the empty |
283 | * function insures that the original callback is not used anymore. This insured | 209 | * function insures that the original callback is not used anymore. This insured |
284 | * by preempt_disable around the call site. | 210 | * by preempt_disable around the call site. |
285 | */ | 211 | */ |
286 | static void disable_tracepoint(struct tracepoint *elem) | 212 | static int tracepoint_remove_func(struct tracepoint *tp, |
213 | struct tracepoint_func *func) | ||
287 | { | 214 | { |
288 | if (elem->unregfunc && static_key_enabled(&elem->key)) | 215 | struct tracepoint_func *old, *tp_funcs; |
289 | elem->unregfunc(); | ||
290 | |||
291 | if (static_key_enabled(&elem->key)) | ||
292 | static_key_slow_dec(&elem->key); | ||
293 | rcu_assign_pointer(elem->funcs, NULL); | ||
294 | } | ||
295 | 216 | ||
296 | /** | 217 | tp_funcs = rcu_dereference_protected(tp->funcs, |
297 | * tracepoint_update_probe_range - Update a probe range | 218 | lockdep_is_held(&tracepoints_mutex)); |
298 | * @begin: beginning of the range | 219 | old = func_remove(&tp_funcs, func); |
299 | * @end: end of the range | 220 | if (IS_ERR(old)) { |
300 | * | 221 | WARN_ON_ONCE(1); |
301 | * Updates the probe callback corresponding to a range of tracepoints. | 222 | return PTR_ERR(old); |
302 | * Called with tracepoints_mutex held. | ||
303 | */ | ||
304 | static void tracepoint_update_probe_range(struct tracepoint * const *begin, | ||
305 | struct tracepoint * const *end) | ||
306 | { | ||
307 | struct tracepoint * const *iter; | ||
308 | struct tracepoint_entry *mark_entry; | ||
309 | |||
310 | if (!begin) | ||
311 | return; | ||
312 | |||
313 | for (iter = begin; iter < end; iter++) { | ||
314 | mark_entry = get_tracepoint((*iter)->name); | ||
315 | if (mark_entry) { | ||
316 | set_tracepoint(&mark_entry, *iter, | ||
317 | !!mark_entry->refcount); | ||
318 | mark_entry->enabled = !!mark_entry->refcount; | ||
319 | } else { | ||
320 | disable_tracepoint(*iter); | ||
321 | } | ||
322 | } | 223 | } |
323 | } | 224 | release_probes(old); |
324 | |||
325 | #ifdef CONFIG_MODULES | ||
326 | void module_update_tracepoints(void) | ||
327 | { | ||
328 | struct tp_module *tp_mod; | ||
329 | |||
330 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
331 | tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, | ||
332 | tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); | ||
333 | } | ||
334 | #else /* CONFIG_MODULES */ | ||
335 | void module_update_tracepoints(void) | ||
336 | { | ||
337 | } | ||
338 | #endif /* CONFIG_MODULES */ | ||
339 | 225 | ||
226 | if (!tp_funcs) { | ||
227 | /* Removed last function */ | ||
228 | if (tp->unregfunc && static_key_enabled(&tp->key)) | ||
229 | tp->unregfunc(); | ||
340 | 230 | ||
341 | /* | 231 | if (static_key_enabled(&tp->key)) |
342 | * Update probes, removing the faulty probes. | 232 | static_key_slow_dec(&tp->key); |
343 | * Called with tracepoints_mutex held. | ||
344 | */ | ||
345 | static void tracepoint_update_probes(void) | ||
346 | { | ||
347 | /* Core kernel tracepoints */ | ||
348 | tracepoint_update_probe_range(__start___tracepoints_ptrs, | ||
349 | __stop___tracepoints_ptrs); | ||
350 | /* tracepoints in modules. */ | ||
351 | module_update_tracepoints(); | ||
352 | } | ||
353 | |||
354 | static struct tracepoint_func * | ||
355 | tracepoint_add_probe(const char *name, void *probe, void *data) | ||
356 | { | ||
357 | struct tracepoint_entry *entry; | ||
358 | struct tracepoint_func *old; | ||
359 | |||
360 | entry = get_tracepoint(name); | ||
361 | if (!entry) { | ||
362 | entry = add_tracepoint(name); | ||
363 | if (IS_ERR(entry)) | ||
364 | return (struct tracepoint_func *)entry; | ||
365 | } | 233 | } |
366 | old = tracepoint_entry_add_probe(entry, probe, data); | 234 | rcu_assign_pointer(tp->funcs, tp_funcs); |
367 | if (IS_ERR(old) && !entry->refcount) | 235 | return 0; |
368 | remove_tracepoint(entry); | ||
369 | return old; | ||
370 | } | 236 | } |
371 | 237 | ||
372 | /** | 238 | /** |
373 | * tracepoint_probe_register - Connect a probe to a tracepoint | 239 | * tracepoint_probe_register - Connect a probe to a tracepoint |
374 | * @name: tracepoint name | 240 | * @tp: tracepoint |
375 | * @probe: probe handler | 241 | * @probe: probe handler |
376 | * @data: probe private data | ||
377 | * | ||
378 | * Returns: | ||
379 | * - 0 if the probe was successfully registered, and tracepoint | ||
380 | * callsites are currently loaded for that probe, | ||
381 | * - -ENODEV if the probe was successfully registered, but no tracepoint | ||
382 | * callsite is currently loaded for that probe, | ||
383 | * - other negative error value on error. | ||
384 | * | ||
385 | * When tracepoint_probe_register() returns either 0 or -ENODEV, | ||
386 | * parameters @name, @probe, and @data may be used by the tracepoint | ||
387 | * infrastructure until the probe is unregistered. | ||
388 | * | 242 | * |
389 | * The probe address must at least be aligned on the architecture pointer size. | 243 | * Returns 0 if ok, error value on error. |
244 | * Note: if @tp is within a module, the caller is responsible for | ||
245 | * unregistering the probe before the module is gone. This can be | ||
246 | * performed either with a tracepoint module going notifier, or from | ||
247 | * within module exit functions. | ||
390 | */ | 248 | */ |
391 | int tracepoint_probe_register(const char *name, void *probe, void *data) | 249 | int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) |
392 | { | 250 | { |
393 | struct tracepoint_func *old; | 251 | struct tracepoint_func tp_func; |
394 | struct tracepoint_entry *entry; | 252 | int ret; |
395 | int ret = 0; | ||
396 | 253 | ||
397 | mutex_lock(&tracepoints_mutex); | 254 | mutex_lock(&tracepoints_mutex); |
398 | old = tracepoint_add_probe(name, probe, data); | 255 | tp_func.func = probe; |
399 | if (IS_ERR(old)) { | 256 | tp_func.data = data; |
400 | mutex_unlock(&tracepoints_mutex); | 257 | ret = tracepoint_add_func(tp, &tp_func); |
401 | return PTR_ERR(old); | ||
402 | } | ||
403 | tracepoint_update_probes(); /* may update entry */ | ||
404 | entry = get_tracepoint(name); | ||
405 | /* Make sure the entry was enabled */ | ||
406 | if (!entry || !entry->enabled) | ||
407 | ret = -ENODEV; | ||
408 | mutex_unlock(&tracepoints_mutex); | 258 | mutex_unlock(&tracepoints_mutex); |
409 | release_probes(old); | ||
410 | return ret; | 259 | return ret; |
411 | } | 260 | } |
412 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 261 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
413 | 262 | ||
414 | static struct tracepoint_func * | ||
415 | tracepoint_remove_probe(const char *name, void *probe, void *data) | ||
416 | { | ||
417 | struct tracepoint_entry *entry; | ||
418 | struct tracepoint_func *old; | ||
419 | |||
420 | entry = get_tracepoint(name); | ||
421 | if (!entry) | ||
422 | return ERR_PTR(-ENOENT); | ||
423 | old = tracepoint_entry_remove_probe(entry, probe, data); | ||
424 | if (IS_ERR(old)) | ||
425 | return old; | ||
426 | if (!entry->refcount) | ||
427 | remove_tracepoint(entry); | ||
428 | return old; | ||
429 | } | ||
430 | |||
431 | /** | 263 | /** |
432 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 264 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
433 | * @name: tracepoint name | 265 | * @tp: tracepoint |
434 | * @probe: probe function pointer | 266 | * @probe: probe function pointer |
435 | * @data: probe private data | ||
436 | * | 267 | * |
437 | * We do not need to call a synchronize_sched to make sure the probes have | 268 | * Returns 0 if ok, error value on error. |
438 | * finished running before doing a module unload, because the module unload | ||
439 | * itself uses stop_machine(), which insures that every preempt disabled section | ||
440 | * have finished. | ||
441 | */ | 269 | */ |
442 | int tracepoint_probe_unregister(const char *name, void *probe, void *data) | 270 | int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) |
443 | { | 271 | { |
444 | struct tracepoint_func *old; | 272 | struct tracepoint_func tp_func; |
273 | int ret; | ||
445 | 274 | ||
446 | mutex_lock(&tracepoints_mutex); | 275 | mutex_lock(&tracepoints_mutex); |
447 | old = tracepoint_remove_probe(name, probe, data); | 276 | tp_func.func = probe; |
448 | if (IS_ERR(old)) { | 277 | tp_func.data = data; |
449 | mutex_unlock(&tracepoints_mutex); | 278 | ret = tracepoint_remove_func(tp, &tp_func); |
450 | return PTR_ERR(old); | ||
451 | } | ||
452 | tracepoint_update_probes(); /* may update entry */ | ||
453 | mutex_unlock(&tracepoints_mutex); | 279 | mutex_unlock(&tracepoints_mutex); |
454 | release_probes(old); | 280 | return ret; |
455 | return 0; | ||
456 | } | 281 | } |
457 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 282 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
458 | 283 | ||
459 | |||
460 | #ifdef CONFIG_MODULES | 284 | #ifdef CONFIG_MODULES |
461 | bool trace_module_has_bad_taint(struct module *mod) | 285 | bool trace_module_has_bad_taint(struct module *mod) |
462 | { | 286 | { |
@@ -464,6 +288,74 @@ bool trace_module_has_bad_taint(struct module *mod) | |||
464 | (1 << TAINT_UNSIGNED_MODULE)); | 288 | (1 << TAINT_UNSIGNED_MODULE)); |
465 | } | 289 | } |
466 | 290 | ||
291 | static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); | ||
292 | |||
293 | /** | ||
294 | * register_tracepoint_notifier - register tracepoint coming/going notifier | ||
295 | * @nb: notifier block | ||
296 | * | ||
297 | * Notifiers registered with this function are called on module | ||
298 | * coming/going with the tracepoint_module_list_mutex held. | ||
299 | * The notifier block callback should expect a "struct tp_module" data | ||
300 | * pointer. | ||
301 | */ | ||
302 | int register_tracepoint_module_notifier(struct notifier_block *nb) | ||
303 | { | ||
304 | struct tp_module *tp_mod; | ||
305 | int ret; | ||
306 | |||
307 | mutex_lock(&tracepoint_module_list_mutex); | ||
308 | ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); | ||
309 | if (ret) | ||
310 | goto end; | ||
311 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
312 | (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); | ||
313 | end: | ||
314 | mutex_unlock(&tracepoint_module_list_mutex); | ||
315 | return ret; | ||
316 | } | ||
317 | EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); | ||
318 | |||
319 | /** | ||
320 | * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier | ||
321 | * @nb: notifier block | ||
322 | * | ||
323 | * The notifier block callback should expect a "struct tp_module" data | ||
324 | * pointer. | ||
325 | */ | ||
326 | int unregister_tracepoint_module_notifier(struct notifier_block *nb) | ||
327 | { | ||
328 | struct tp_module *tp_mod; | ||
329 | int ret; | ||
330 | |||
331 | mutex_lock(&tracepoint_module_list_mutex); | ||
332 | ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); | ||
333 | if (ret) | ||
334 | goto end; | ||
335 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
336 | (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); | ||
337 | end: | ||
338 | mutex_unlock(&tracepoint_module_list_mutex); | ||
339 | return ret; | ||
340 | |||
341 | } | ||
342 | EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); | ||
343 | |||
344 | /* | ||
345 | * Ensure the tracer unregistered the module's probes before the module | ||
346 | * teardown is performed. Prevents leaks of probe and data pointers. | ||
347 | */ | ||
348 | static void tp_module_going_check_quiescent(struct tracepoint * const *begin, | ||
349 | struct tracepoint * const *end) | ||
350 | { | ||
351 | struct tracepoint * const *iter; | ||
352 | |||
353 | if (!begin) | ||
354 | return; | ||
355 | for (iter = begin; iter < end; iter++) | ||
356 | WARN_ON_ONCE((*iter)->funcs); | ||
357 | } | ||
358 | |||
467 | static int tracepoint_module_coming(struct module *mod) | 359 | static int tracepoint_module_coming(struct module *mod) |
468 | { | 360 | { |
469 | struct tp_module *tp_mod; | 361 | struct tp_module *tp_mod; |
@@ -479,36 +371,41 @@ static int tracepoint_module_coming(struct module *mod) | |||
479 | */ | 371 | */ |
480 | if (trace_module_has_bad_taint(mod)) | 372 | if (trace_module_has_bad_taint(mod)) |
481 | return 0; | 373 | return 0; |
482 | mutex_lock(&tracepoints_mutex); | 374 | mutex_lock(&tracepoint_module_list_mutex); |
483 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 375 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
484 | if (!tp_mod) { | 376 | if (!tp_mod) { |
485 | ret = -ENOMEM; | 377 | ret = -ENOMEM; |
486 | goto end; | 378 | goto end; |
487 | } | 379 | } |
488 | tp_mod->num_tracepoints = mod->num_tracepoints; | 380 | tp_mod->mod = mod; |
489 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; | ||
490 | list_add_tail(&tp_mod->list, &tracepoint_module_list); | 381 | list_add_tail(&tp_mod->list, &tracepoint_module_list); |
491 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 382 | blocking_notifier_call_chain(&tracepoint_notify_list, |
492 | mod->tracepoints_ptrs + mod->num_tracepoints); | 383 | MODULE_STATE_COMING, tp_mod); |
493 | end: | 384 | end: |
494 | mutex_unlock(&tracepoints_mutex); | 385 | mutex_unlock(&tracepoint_module_list_mutex); |
495 | return ret; | 386 | return ret; |
496 | } | 387 | } |
497 | 388 | ||
498 | static int tracepoint_module_going(struct module *mod) | 389 | static void tracepoint_module_going(struct module *mod) |
499 | { | 390 | { |
500 | struct tp_module *pos; | 391 | struct tp_module *tp_mod; |
501 | 392 | ||
502 | if (!mod->num_tracepoints) | 393 | if (!mod->num_tracepoints) |
503 | return 0; | 394 | return; |
504 | 395 | ||
505 | mutex_lock(&tracepoints_mutex); | 396 | mutex_lock(&tracepoint_module_list_mutex); |
506 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 397 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) { |
507 | mod->tracepoints_ptrs + mod->num_tracepoints); | 398 | if (tp_mod->mod == mod) { |
508 | list_for_each_entry(pos, &tracepoint_module_list, list) { | 399 | blocking_notifier_call_chain(&tracepoint_notify_list, |
509 | if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { | 400 | MODULE_STATE_GOING, tp_mod); |
510 | list_del(&pos->list); | 401 | list_del(&tp_mod->list); |
511 | kfree(pos); | 402 | kfree(tp_mod); |
403 | /* | ||
404 | * Called the going notifier before checking for | ||
405 | * quiescence. | ||
406 | */ | ||
407 | tp_module_going_check_quiescent(mod->tracepoints_ptrs, | ||
408 | mod->tracepoints_ptrs + mod->num_tracepoints); | ||
512 | break; | 409 | break; |
513 | } | 410 | } |
514 | } | 411 | } |
@@ -518,12 +415,11 @@ static int tracepoint_module_going(struct module *mod) | |||
518 | * flag on "going", in case a module taints the kernel only after being | 415 | * flag on "going", in case a module taints the kernel only after being |
519 | * loaded. | 416 | * loaded. |
520 | */ | 417 | */ |
521 | mutex_unlock(&tracepoints_mutex); | 418 | mutex_unlock(&tracepoint_module_list_mutex); |
522 | return 0; | ||
523 | } | 419 | } |
524 | 420 | ||
525 | int tracepoint_module_notify(struct notifier_block *self, | 421 | static int tracepoint_module_notify(struct notifier_block *self, |
526 | unsigned long val, void *data) | 422 | unsigned long val, void *data) |
527 | { | 423 | { |
528 | struct module *mod = data; | 424 | struct module *mod = data; |
529 | int ret = 0; | 425 | int ret = 0; |
@@ -535,24 +431,58 @@ int tracepoint_module_notify(struct notifier_block *self, | |||
535 | case MODULE_STATE_LIVE: | 431 | case MODULE_STATE_LIVE: |
536 | break; | 432 | break; |
537 | case MODULE_STATE_GOING: | 433 | case MODULE_STATE_GOING: |
538 | ret = tracepoint_module_going(mod); | 434 | tracepoint_module_going(mod); |
435 | break; | ||
436 | case MODULE_STATE_UNFORMED: | ||
539 | break; | 437 | break; |
540 | } | 438 | } |
541 | return ret; | 439 | return ret; |
542 | } | 440 | } |
543 | 441 | ||
544 | struct notifier_block tracepoint_module_nb = { | 442 | static struct notifier_block tracepoint_module_nb = { |
545 | .notifier_call = tracepoint_module_notify, | 443 | .notifier_call = tracepoint_module_notify, |
546 | .priority = 0, | 444 | .priority = 0, |
547 | }; | 445 | }; |
548 | 446 | ||
549 | static int init_tracepoints(void) | 447 | static __init int init_tracepoints(void) |
550 | { | 448 | { |
551 | return register_module_notifier(&tracepoint_module_nb); | 449 | int ret; |
450 | |||
451 | ret = register_module_notifier(&tracepoint_module_nb); | ||
452 | if (ret) | ||
453 | pr_warning("Failed to register tracepoint module enter notifier\n"); | ||
454 | |||
455 | return ret; | ||
552 | } | 456 | } |
553 | __initcall(init_tracepoints); | 457 | __initcall(init_tracepoints); |
554 | #endif /* CONFIG_MODULES */ | 458 | #endif /* CONFIG_MODULES */ |
555 | 459 | ||
460 | static void for_each_tracepoint_range(struct tracepoint * const *begin, | ||
461 | struct tracepoint * const *end, | ||
462 | void (*fct)(struct tracepoint *tp, void *priv), | ||
463 | void *priv) | ||
464 | { | ||
465 | struct tracepoint * const *iter; | ||
466 | |||
467 | if (!begin) | ||
468 | return; | ||
469 | for (iter = begin; iter < end; iter++) | ||
470 | fct(*iter, priv); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * for_each_kernel_tracepoint - iteration on all kernel tracepoints | ||
475 | * @fct: callback | ||
476 | * @priv: private data | ||
477 | */ | ||
478 | void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), | ||
479 | void *priv) | ||
480 | { | ||
481 | for_each_tracepoint_range(__start___tracepoints_ptrs, | ||
482 | __stop___tracepoints_ptrs, fct, priv); | ||
483 | } | ||
484 | EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); | ||
485 | |||
556 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 486 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
557 | 487 | ||
558 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 488 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |