aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/kexec.c59
-rw-r--r--kernel/ksysfs.c21
-rw-r--r--kernel/module.c28
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/pid.c12
-rw-r--r--kernel/printk.c119
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/signal.c38
-rw-r--r--kernel/time/timecompare.c2
-rw-r--r--kernel/trace/ftrace.c30
-rw-r--r--kernel/trace/power-traces.c2
-rw-r--r--kernel/trace/ring_buffer.c29
-rw-r--r--kernel/trace/trace.c186
-rw-r--r--kernel/trace/trace.h23
-rw-r--r--kernel/trace/trace_event_profile.c6
-rw-r--r--kernel/trace/trace_events.c41
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c9
-rw-r--r--kernel/trace/trace_ksym.c56
-rw-r--r--kernel/trace/trace_syscalls.c18
22 files changed, 443 insertions, 256 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 9bd91447e052..202a0ba63d3c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1127,6 +1127,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1127#ifdef CONFIG_DEBUG_MUTEXES 1127#ifdef CONFIG_DEBUG_MUTEXES
1128 p->blocked_on = NULL; /* not blocked yet */ 1128 p->blocked_on = NULL; /* not blocked yet */
1129#endif 1129#endif
1130#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1131 p->memcg_batch.do_batch = 0;
1132 p->memcg_batch.memcg = NULL;
1133#endif
1130 1134
1131 p->bts = NULL; 1135 p->bts = NULL;
1132 1136
@@ -1206,9 +1210,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1206 p->sas_ss_sp = p->sas_ss_size = 0; 1210 p->sas_ss_sp = p->sas_ss_size = 0;
1207 1211
1208 /* 1212 /*
1209 * Syscall tracing should be turned off in the child regardless 1213 * Syscall tracing and stepping should be turned off in the
1210 * of CLONE_PTRACE. 1214 * child regardless of CLONE_PTRACE.
1211 */ 1215 */
1216 user_disable_single_step(p);
1212 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1217 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1213#ifdef TIF_SYSCALL_EMU 1218#ifdef TIF_SYSCALL_EMU
1214 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1219 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index f336e2107f98..433e9fcc1fc5 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -31,6 +31,7 @@
31#include <linux/cpu.h> 31#include <linux/cpu.h>
32#include <linux/console.h> 32#include <linux/console.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/swap.h>
34 35
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
@@ -1082,6 +1083,64 @@ void crash_kexec(struct pt_regs *regs)
1082 } 1083 }
1083} 1084}
1084 1085
1086size_t crash_get_memory_size(void)
1087{
1088 size_t size;
1089 mutex_lock(&kexec_mutex);
1090 size = crashk_res.end - crashk_res.start + 1;
1091 mutex_unlock(&kexec_mutex);
1092 return size;
1093}
1094
1095static void free_reserved_phys_range(unsigned long begin, unsigned long end)
1096{
1097 unsigned long addr;
1098
1099 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1100 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1101 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1102 free_page((unsigned long)__va(addr));
1103 totalram_pages++;
1104 }
1105}
1106
1107int crash_shrink_memory(unsigned long new_size)
1108{
1109 int ret = 0;
1110 unsigned long start, end;
1111
1112 mutex_lock(&kexec_mutex);
1113
1114 if (kexec_crash_image) {
1115 ret = -ENOENT;
1116 goto unlock;
1117 }
1118 start = crashk_res.start;
1119 end = crashk_res.end;
1120
1121 if (new_size >= end - start + 1) {
1122 ret = -EINVAL;
1123 if (new_size == end - start + 1)
1124 ret = 0;
1125 goto unlock;
1126 }
1127
1128 start = roundup(start, PAGE_SIZE);
1129 end = roundup(start + new_size, PAGE_SIZE);
1130
1131 free_reserved_phys_range(end, crashk_res.end);
1132
1133 if (start == end) {
1134 crashk_res.end = end;
1135 release_resource(&crashk_res);
1136 } else
1137 crashk_res.end = end - 1;
1138
1139unlock:
1140 mutex_unlock(&kexec_mutex);
1141 return ret;
1142}
1143
1085static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 1144static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1086 size_t data_len) 1145 size_t data_len)
1087{ 1146{
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 528dd78e7e7e..3feaf5a74514 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -100,6 +100,26 @@ static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
100} 100}
101KERNEL_ATTR_RO(kexec_crash_loaded); 101KERNEL_ATTR_RO(kexec_crash_loaded);
102 102
103static ssize_t kexec_crash_size_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *buf)
105{
106 return sprintf(buf, "%zu\n", crash_get_memory_size());
107}
108static ssize_t kexec_crash_size_store(struct kobject *kobj,
109 struct kobj_attribute *attr,
110 const char *buf, size_t count)
111{
112 unsigned long cnt;
113 int ret;
114
115 if (strict_strtoul(buf, 0, &cnt))
116 return -EINVAL;
117
118 ret = crash_shrink_memory(cnt);
119 return ret < 0 ? ret : count;
120}
121KERNEL_ATTR_RW(kexec_crash_size);
122
103static ssize_t vmcoreinfo_show(struct kobject *kobj, 123static ssize_t vmcoreinfo_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *buf) 124 struct kobj_attribute *attr, char *buf)
105{ 125{
@@ -147,6 +167,7 @@ static struct attribute * kernel_attrs[] = {
147#ifdef CONFIG_KEXEC 167#ifdef CONFIG_KEXEC
148 &kexec_loaded_attr.attr, 168 &kexec_loaded_attr.attr,
149 &kexec_crash_loaded_attr.attr, 169 &kexec_crash_loaded_attr.attr,
170 &kexec_crash_size_attr.attr,
150 &vmcoreinfo_attr.attr, 171 &vmcoreinfo_attr.attr,
151#endif 172#endif
152 NULL 173 NULL
diff --git a/kernel/module.c b/kernel/module.c
index 12afc5a3ddd3..a65dc787a27b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -880,11 +880,23 @@ static int try_to_force_load(struct module *mod, const char *reason)
880} 880}
881 881
882#ifdef CONFIG_MODVERSIONS 882#ifdef CONFIG_MODVERSIONS
883/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
884static unsigned long maybe_relocated(unsigned long crc,
885 const struct module *crc_owner)
886{
887#ifdef ARCH_RELOCATES_KCRCTAB
888 if (crc_owner == NULL)
889 return crc - (unsigned long)reloc_start;
890#endif
891 return crc;
892}
893
883static int check_version(Elf_Shdr *sechdrs, 894static int check_version(Elf_Shdr *sechdrs,
884 unsigned int versindex, 895 unsigned int versindex,
885 const char *symname, 896 const char *symname,
886 struct module *mod, 897 struct module *mod,
887 const unsigned long *crc) 898 const unsigned long *crc,
899 const struct module *crc_owner)
888{ 900{
889 unsigned int i, num_versions; 901 unsigned int i, num_versions;
890 struct modversion_info *versions; 902 struct modversion_info *versions;
@@ -905,10 +917,10 @@ static int check_version(Elf_Shdr *sechdrs,
905 if (strcmp(versions[i].name, symname) != 0) 917 if (strcmp(versions[i].name, symname) != 0)
906 continue; 918 continue;
907 919
908 if (versions[i].crc == *crc) 920 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
909 return 1; 921 return 1;
910 DEBUGP("Found checksum %lX vs module %lX\n", 922 DEBUGP("Found checksum %lX vs module %lX\n",
911 *crc, versions[i].crc); 923 maybe_relocated(*crc, crc_owner), versions[i].crc);
912 goto bad_version; 924 goto bad_version;
913 } 925 }
914 926
@@ -931,7 +943,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
931 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, 943 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
932 &crc, true, false)) 944 &crc, true, false))
933 BUG(); 945 BUG();
934 return check_version(sechdrs, versindex, "module_layout", mod, crc); 946 return check_version(sechdrs, versindex, "module_layout", mod, crc,
947 NULL);
935} 948}
936 949
937/* First part is kernel version, which we ignore if module has crcs. */ 950/* First part is kernel version, which we ignore if module has crcs. */
@@ -949,7 +962,8 @@ static inline int check_version(Elf_Shdr *sechdrs,
949 unsigned int versindex, 962 unsigned int versindex,
950 const char *symname, 963 const char *symname,
951 struct module *mod, 964 struct module *mod,
952 const unsigned long *crc) 965 const unsigned long *crc,
966 const struct module *crc_owner)
953{ 967{
954 return 1; 968 return 1;
955} 969}
@@ -984,8 +998,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
984 /* use_module can fail due to OOM, 998 /* use_module can fail due to OOM,
985 or module initialization or unloading */ 999 or module initialization or unloading */
986 if (sym) { 1000 if (sym) {
987 if (!check_version(sechdrs, versindex, name, mod, crc) || 1001 if (!check_version(sechdrs, versindex, name, mod, crc, owner)
988 !use_module(mod, owner)) 1002 || !use_module(mod, owner))
989 sym = NULL; 1003 sym = NULL;
990 } 1004 }
991 return sym; 1005 return sym;
diff --git a/kernel/panic.c b/kernel/panic.c
index 96b45d0b4ba5..5827f7b97254 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/debug_locks.h> 11#include <linux/debug_locks.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/kmsg_dump.h>
13#include <linux/kallsyms.h> 14#include <linux/kallsyms.h>
14#include <linux/notifier.h> 15#include <linux/notifier.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...)
74 dump_stack(); 75 dump_stack();
75#endif 76#endif
76 77
78 kmsg_dump(KMSG_DUMP_PANIC);
77 /* 79 /*
78 * If we have crashed and we have a crash kernel loaded let it handle 80 * If we have crashed and we have a crash kernel loaded let it handle
79 * everything else. 81 * everything else.
@@ -339,6 +341,7 @@ void oops_exit(void)
339{ 341{
340 do_oops_enter_exit(); 342 do_oops_enter_exit();
341 print_oops_end_marker(); 343 print_oops_end_marker();
344 kmsg_dump(KMSG_DUMP_OOPS);
342} 345}
343 346
344#ifdef WANT_WARN_ON_SLOWPATH 347#ifdef WANT_WARN_ON_SLOWPATH
diff --git a/kernel/pid.c b/kernel/pid.c
index d3f722d20f9c..2e17c9c92cbe 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -141,11 +141,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
141 * installing it: 141 * installing it:
142 */ 142 */
143 spin_lock_irq(&pidmap_lock); 143 spin_lock_irq(&pidmap_lock);
144 if (map->page) 144 if (!map->page) {
145 kfree(page);
146 else
147 map->page = page; 145 map->page = page;
146 page = NULL;
147 }
148 spin_unlock_irq(&pidmap_lock); 148 spin_unlock_irq(&pidmap_lock);
149 kfree(page);
149 if (unlikely(!map->page)) 150 if (unlikely(!map->page))
150 break; 151 break;
151 } 152 }
@@ -268,12 +269,11 @@ struct pid *alloc_pid(struct pid_namespace *ns)
268 for (type = 0; type < PIDTYPE_MAX; ++type) 269 for (type = 0; type < PIDTYPE_MAX; ++type)
269 INIT_HLIST_HEAD(&pid->tasks[type]); 270 INIT_HLIST_HEAD(&pid->tasks[type]);
270 271
272 upid = pid->numbers + ns->level;
271 spin_lock_irq(&pidmap_lock); 273 spin_lock_irq(&pidmap_lock);
272 for (i = ns->level; i >= 0; i--) { 274 for ( ; upid >= pid->numbers; --upid)
273 upid = &pid->numbers[i];
274 hlist_add_head_rcu(&upid->pid_chain, 275 hlist_add_head_rcu(&upid->pid_chain,
275 &pid_hash[pid_hashfn(upid->nr, upid->ns)]); 276 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
276 }
277 spin_unlock_irq(&pidmap_lock); 277 spin_unlock_irq(&pidmap_lock);
278 278
279out: 279out:
diff --git a/kernel/printk.c b/kernel/printk.c
index b5ac4d99c667..1ded8e7dd19b 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -34,6 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h> 36#include <linux/ratelimit.h>
37#include <linux/kmsg_dump.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
@@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1405 return false; 1406 return false;
1406} 1407}
1407EXPORT_SYMBOL(printk_timed_ratelimit); 1408EXPORT_SYMBOL(printk_timed_ratelimit);
1409
1410static DEFINE_SPINLOCK(dump_list_lock);
1411static LIST_HEAD(dump_list);
1412
1413/**
1414 * kmsg_dump_register - register a kernel log dumper.
1415 * @dump: pointer to the kmsg_dumper structure
1416 *
1417 * Adds a kernel log dumper to the system. The dump callback in the
1418 * structure will be called when the kernel oopses or panics and must be
1419 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
1420 */
1421int kmsg_dump_register(struct kmsg_dumper *dumper)
1422{
1423 unsigned long flags;
1424 int err = -EBUSY;
1425
1426 /* The dump callback needs to be set */
1427 if (!dumper->dump)
1428 return -EINVAL;
1429
1430 spin_lock_irqsave(&dump_list_lock, flags);
1431 /* Don't allow registering multiple times */
1432 if (!dumper->registered) {
1433 dumper->registered = 1;
1434 list_add_tail(&dumper->list, &dump_list);
1435 err = 0;
1436 }
1437 spin_unlock_irqrestore(&dump_list_lock, flags);
1438
1439 return err;
1440}
1441EXPORT_SYMBOL_GPL(kmsg_dump_register);
1442
1443/**
1444 * kmsg_dump_unregister - unregister a kmsg dumper.
1445 * @dump: pointer to the kmsg_dumper structure
1446 *
1447 * Removes a dump device from the system. Returns zero on success and
1448 * %-EINVAL otherwise.
1449 */
1450int kmsg_dump_unregister(struct kmsg_dumper *dumper)
1451{
1452 unsigned long flags;
1453 int err = -EINVAL;
1454
1455 spin_lock_irqsave(&dump_list_lock, flags);
1456 if (dumper->registered) {
1457 dumper->registered = 0;
1458 list_del(&dumper->list);
1459 err = 0;
1460 }
1461 spin_unlock_irqrestore(&dump_list_lock, flags);
1462
1463 return err;
1464}
1465EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
1466
1467static const char const *kmsg_reasons[] = {
1468 [KMSG_DUMP_OOPS] = "oops",
1469 [KMSG_DUMP_PANIC] = "panic",
1470};
1471
1472static const char *kmsg_to_str(enum kmsg_dump_reason reason)
1473{
1474 if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
1475 return "unknown";
1476
1477 return kmsg_reasons[reason];
1478}
1479
1480/**
1481 * kmsg_dump - dump kernel log to kernel message dumpers.
1482 * @reason: the reason (oops, panic etc) for dumping
1483 *
1484 * Iterate through each of the dump devices and call the oops/panic
1485 * callbacks with the log buffer.
1486 */
1487void kmsg_dump(enum kmsg_dump_reason reason)
1488{
1489 unsigned long end;
1490 unsigned chars;
1491 struct kmsg_dumper *dumper;
1492 const char *s1, *s2;
1493 unsigned long l1, l2;
1494 unsigned long flags;
1495
1496 /* Theoretically, the log could move on after we do this, but
1497 there's not a lot we can do about that. The new messages
1498 will overwrite the start of what we dump. */
1499 spin_lock_irqsave(&logbuf_lock, flags);
1500 end = log_end & LOG_BUF_MASK;
1501 chars = logged_chars;
1502 spin_unlock_irqrestore(&logbuf_lock, flags);
1503
1504 if (logged_chars > end) {
1505 s1 = log_buf + log_buf_len - logged_chars + end;
1506 l1 = logged_chars - end;
1507
1508 s2 = log_buf;
1509 l2 = end;
1510 } else {
1511 s1 = "";
1512 l1 = 0;
1513
1514 s2 = log_buf + end - logged_chars;
1515 l2 = logged_chars;
1516 }
1517
1518 if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
1519 printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
1520 kmsg_to_str(reason));
1521 return;
1522 }
1523 list_for_each_entry(dumper, &dump_list, list)
1524 dumper->dump(dumper, reason, s1, l1, s2, l2);
1525 spin_unlock_irqrestore(&dump_list_lock, flags);
1526}
1408#endif 1527#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index 760c26209a3c..c705a41b4ba3 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1198,7 +1198,7 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
1198 relay_consume_bytes(rbuf, buf->private); 1198 relay_consume_bytes(rbuf, buf->private);
1199} 1199}
1200 1200
1201static struct pipe_buf_operations relay_pipe_buf_ops = { 1201static const struct pipe_buf_operations relay_pipe_buf_ops = {
1202 .can_merge = 0, 1202 .can_merge = 0,
1203 .map = generic_pipe_buf_map, 1203 .map = generic_pipe_buf_map,
1204 .unmap = generic_pipe_buf_unmap, 1204 .unmap = generic_pipe_buf_unmap,
diff --git a/kernel/signal.c b/kernel/signal.c
index 6b982f2cf524..1814e68e4de3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -423,7 +423,7 @@ still_pending:
423 */ 423 */
424 info->si_signo = sig; 424 info->si_signo = sig;
425 info->si_errno = 0; 425 info->si_errno = 0;
426 info->si_code = 0; 426 info->si_code = SI_USER;
427 info->si_pid = 0; 427 info->si_pid = 0;
428 info->si_uid = 0; 428 info->si_uid = 0;
429 } 429 }
@@ -607,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
607 return 1; 607 return 1;
608} 608}
609 609
610static inline int is_si_special(const struct siginfo *info)
611{
612 return info <= SEND_SIG_FORCED;
613}
614
615static inline bool si_fromuser(const struct siginfo *info)
616{
617 return info == SEND_SIG_NOINFO ||
618 (!is_si_special(info) && SI_FROMUSER(info));
619}
620
610/* 621/*
611 * Bad permissions for sending the signal 622 * Bad permissions for sending the signal
612 * - the caller must hold at least the RCU read lock 623 * - the caller must hold at least the RCU read lock
@@ -621,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
621 if (!valid_signal(sig)) 632 if (!valid_signal(sig))
622 return -EINVAL; 633 return -EINVAL;
623 634
624 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 635 if (!si_fromuser(info))
625 return 0; 636 return 0;
626 637
627 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 638 error = audit_signal_info(sig, t); /* Let audit system see the signal */
@@ -949,9 +960,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
949 int from_ancestor_ns = 0; 960 int from_ancestor_ns = 0;
950 961
951#ifdef CONFIG_PID_NS 962#ifdef CONFIG_PID_NS
952 if (!is_si_special(info) && SI_FROMUSER(info) && 963 from_ancestor_ns = si_fromuser(info) &&
953 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) 964 !task_pid_nr_ns(current, task_active_pid_ns(t));
954 from_ancestor_ns = 1;
955#endif 965#endif
956 966
957 return __send_signal(sig, info, t, group, from_ancestor_ns); 967 return __send_signal(sig, info, t, group, from_ancestor_ns);
@@ -1052,12 +1062,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1052 return ret; 1062 return ret;
1053} 1063}
1054 1064
1055void
1056force_sig_specific(int sig, struct task_struct *t)
1057{
1058 force_sig_info(sig, SEND_SIG_FORCED, t);
1059}
1060
1061/* 1065/*
1062 * Nuke all other threads in the group. 1066 * Nuke all other threads in the group.
1063 */ 1067 */
@@ -1186,8 +1190,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1186 goto out_unlock; 1190 goto out_unlock;
1187 } 1191 }
1188 pcred = __task_cred(p); 1192 pcred = __task_cred(p);
1189 if ((info == SEND_SIG_NOINFO || 1193 if (si_fromuser(info) &&
1190 (!is_si_special(info) && SI_FROMUSER(info))) &&
1191 euid != pcred->suid && euid != pcred->uid && 1194 euid != pcred->suid && euid != pcred->uid &&
1192 uid != pcred->suid && uid != pcred->uid) { 1195 uid != pcred->suid && uid != pcred->uid) {
1193 ret = -EPERM; 1196 ret = -EPERM;
@@ -1837,11 +1840,6 @@ relock:
1837 1840
1838 for (;;) { 1841 for (;;) {
1839 struct k_sigaction *ka; 1842 struct k_sigaction *ka;
1840
1841 if (unlikely(signal->group_stop_count > 0) &&
1842 do_signal_stop(0))
1843 goto relock;
1844
1845 /* 1843 /*
1846 * Tracing can induce an artifical signal and choose sigaction. 1844 * Tracing can induce an artifical signal and choose sigaction.
1847 * The return value in @signr determines the default action, 1845 * The return value in @signr determines the default action,
@@ -1853,6 +1851,10 @@ relock:
1853 if (unlikely(signr != 0)) 1851 if (unlikely(signr != 0))
1854 ka = return_ka; 1852 ka = return_ka;
1855 else { 1853 else {
1854 if (unlikely(signal->group_stop_count > 0) &&
1855 do_signal_stop(0))
1856 goto relock;
1857
1856 signr = dequeue_signal(current, &current->blocked, 1858 signr = dequeue_signal(current, &current->blocked,
1857 info); 1859 info);
1858 1860
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 96ff643a5a59..12f5c55090be 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -89,7 +89,7 @@ int timecompare_offset(struct timecompare *sync,
89 * source time 89 * source time
90 */ 90 */
91 sample.offset = 91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 - 92 (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
93 ts; 93 ts;
94 94
95 /* simple insertion sort based on duration */ 95 /* simple insertion sort based on duration */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e51a1bcb7bed..7968762c8167 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1724,7 +1724,7 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1724 return ftrace_match(str, regex, len, type); 1724 return ftrace_match(str, regex, len, type);
1725} 1725}
1726 1726
1727static void ftrace_match_records(char *buff, int len, int enable) 1727static int ftrace_match_records(char *buff, int len, int enable)
1728{ 1728{
1729 unsigned int search_len; 1729 unsigned int search_len;
1730 struct ftrace_page *pg; 1730 struct ftrace_page *pg;
@@ -1733,6 +1733,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1733 char *search; 1733 char *search;
1734 int type; 1734 int type;
1735 int not; 1735 int not;
1736 int found = 0;
1736 1737
1737 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1738 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1738 type = filter_parse_regex(buff, len, &search, &not); 1739 type = filter_parse_regex(buff, len, &search, &not);
@@ -1750,6 +1751,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1750 rec->flags &= ~flag; 1751 rec->flags &= ~flag;
1751 else 1752 else
1752 rec->flags |= flag; 1753 rec->flags |= flag;
1754 found = 1;
1753 } 1755 }
1754 /* 1756 /*
1755 * Only enable filtering if we have a function that 1757 * Only enable filtering if we have a function that
@@ -1759,6 +1761,8 @@ static void ftrace_match_records(char *buff, int len, int enable)
1759 ftrace_filtered = 1; 1761 ftrace_filtered = 1;
1760 } while_for_each_ftrace_rec(); 1762 } while_for_each_ftrace_rec();
1761 mutex_unlock(&ftrace_lock); 1763 mutex_unlock(&ftrace_lock);
1764
1765 return found;
1762} 1766}
1763 1767
1764static int 1768static int
@@ -1780,7 +1784,7 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1780 return 1; 1784 return 1;
1781} 1785}
1782 1786
1783static void ftrace_match_module_records(char *buff, char *mod, int enable) 1787static int ftrace_match_module_records(char *buff, char *mod, int enable)
1784{ 1788{
1785 unsigned search_len = 0; 1789 unsigned search_len = 0;
1786 struct ftrace_page *pg; 1790 struct ftrace_page *pg;
@@ -1789,6 +1793,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1789 char *search = buff; 1793 char *search = buff;
1790 unsigned long flag; 1794 unsigned long flag;
1791 int not = 0; 1795 int not = 0;
1796 int found = 0;
1792 1797
1793 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1798 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1794 1799
@@ -1819,12 +1824,15 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1819 rec->flags &= ~flag; 1824 rec->flags &= ~flag;
1820 else 1825 else
1821 rec->flags |= flag; 1826 rec->flags |= flag;
1827 found = 1;
1822 } 1828 }
1823 if (enable && (rec->flags & FTRACE_FL_FILTER)) 1829 if (enable && (rec->flags & FTRACE_FL_FILTER))
1824 ftrace_filtered = 1; 1830 ftrace_filtered = 1;
1825 1831
1826 } while_for_each_ftrace_rec(); 1832 } while_for_each_ftrace_rec();
1827 mutex_unlock(&ftrace_lock); 1833 mutex_unlock(&ftrace_lock);
1834
1835 return found;
1828} 1836}
1829 1837
1830/* 1838/*
@@ -1853,8 +1861,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1853 if (!strlen(mod)) 1861 if (!strlen(mod))
1854 return -EINVAL; 1862 return -EINVAL;
1855 1863
1856 ftrace_match_module_records(func, mod, enable); 1864 if (ftrace_match_module_records(func, mod, enable))
1857 return 0; 1865 return 0;
1866 return -EINVAL;
1858} 1867}
1859 1868
1860static struct ftrace_func_command ftrace_mod_cmd = { 1869static struct ftrace_func_command ftrace_mod_cmd = {
@@ -2151,8 +2160,9 @@ static int ftrace_process_regex(char *buff, int len, int enable)
2151 func = strsep(&next, ":"); 2160 func = strsep(&next, ":");
2152 2161
2153 if (!next) { 2162 if (!next) {
2154 ftrace_match_records(func, len, enable); 2163 if (ftrace_match_records(func, len, enable))
2155 return 0; 2164 return 0;
2165 return ret;
2156 } 2166 }
2157 2167
2158 /* command found */ 2168 /* command found */
@@ -2198,10 +2208,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2198 !trace_parser_cont(parser)) { 2208 !trace_parser_cont(parser)) {
2199 ret = ftrace_process_regex(parser->buffer, 2209 ret = ftrace_process_regex(parser->buffer,
2200 parser->idx, enable); 2210 parser->idx, enable);
2211 trace_parser_clear(parser);
2201 if (ret) 2212 if (ret)
2202 goto out_unlock; 2213 goto out_unlock;
2203
2204 trace_parser_clear(parser);
2205 } 2214 }
2206 2215
2207 ret = read; 2216 ret = read;
@@ -2543,10 +2552,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2543 exists = true; 2552 exists = true;
2544 break; 2553 break;
2545 } 2554 }
2546 if (!exists) { 2555 if (!exists)
2547 array[(*idx)++] = rec->ip; 2556 array[(*idx)++] = rec->ip;
2548 found = 1; 2557 found = 1;
2549 }
2550 } 2558 }
2551 } while_for_each_ftrace_rec(); 2559 } while_for_each_ftrace_rec();
2552 2560
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index e06c6e3d56a3..9f4f565b01e6 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -14,7 +14,5 @@
14#define CREATE_TRACE_POINTS 14#define CREATE_TRACE_POINTS
15#include <trace/events/power.h> 15#include <trace/events/power.h>
16 16
17EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
18EXPORT_TRACEPOINT_SYMBOL_GPL(power_end);
19EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); 17EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
20 18
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f58c9ad15830..2326b04c95c4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 struct list_head *p; 1193 struct list_head *p;
1194 unsigned i; 1194 unsigned i;
1195 1195
1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched();
1198
1199 spin_lock_irq(&cpu_buffer->reader_lock); 1196 spin_lock_irq(&cpu_buffer->reader_lock);
1200 rb_head_page_deactivate(cpu_buffer); 1197 rb_head_page_deactivate(cpu_buffer);
1201 1198
@@ -1211,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1211 return; 1208 return;
1212 1209
1213 rb_reset_cpu(cpu_buffer); 1210 rb_reset_cpu(cpu_buffer);
1214 spin_unlock_irq(&cpu_buffer->reader_lock);
1215
1216 rb_check_pages(cpu_buffer); 1211 rb_check_pages(cpu_buffer);
1217 1212
1218 atomic_dec(&cpu_buffer->record_disabled); 1213 spin_unlock_irq(&cpu_buffer->reader_lock);
1219
1220} 1214}
1221 1215
1222static void 1216static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1227 struct list_head *p; 1221 struct list_head *p;
1228 unsigned i; 1222 unsigned i;
1229 1223
1230 atomic_inc(&cpu_buffer->record_disabled);
1231 synchronize_sched();
1232
1233 spin_lock_irq(&cpu_buffer->reader_lock); 1224 spin_lock_irq(&cpu_buffer->reader_lock);
1234 rb_head_page_deactivate(cpu_buffer); 1225 rb_head_page_deactivate(cpu_buffer);
1235 1226
@@ -1242,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1242 list_add_tail(&bpage->list, cpu_buffer->pages); 1233 list_add_tail(&bpage->list, cpu_buffer->pages);
1243 } 1234 }
1244 rb_reset_cpu(cpu_buffer); 1235 rb_reset_cpu(cpu_buffer);
1245 spin_unlock_irq(&cpu_buffer->reader_lock);
1246
1247 rb_check_pages(cpu_buffer); 1236 rb_check_pages(cpu_buffer);
1248 1237
1249 atomic_dec(&cpu_buffer->record_disabled); 1238 spin_unlock_irq(&cpu_buffer->reader_lock);
1250} 1239}
1251 1240
1252/** 1241/**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1254 * @buffer: the buffer to resize. 1243 * @buffer: the buffer to resize.
1255 * @size: the new size. 1244 * @size: the new size.
1256 * 1245 *
1257 * The tracer is responsible for making sure that the buffer is
1258 * not being used while changing the size.
1259 * Note: We may be able to change the above requirement by using
1260 * RCU synchronizations.
1261 *
1262 * Minimum size is 2 * BUF_PAGE_SIZE. 1246 * Minimum size is 2 * BUF_PAGE_SIZE.
1263 * 1247 *
1264 * Returns -1 on failure. 1248 * Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1290 if (size == buffer_size) 1274 if (size == buffer_size)
1291 return size; 1275 return size;
1292 1276
1277 atomic_inc(&buffer->record_disabled);
1278
1279 /* Make sure all writers are done with this buffer. */
1280 synchronize_sched();
1281
1293 mutex_lock(&buffer->mutex); 1282 mutex_lock(&buffer->mutex);
1294 get_online_cpus(); 1283 get_online_cpus();
1295 1284
@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1352 put_online_cpus(); 1341 put_online_cpus();
1353 mutex_unlock(&buffer->mutex); 1342 mutex_unlock(&buffer->mutex);
1354 1343
1344 atomic_dec(&buffer->record_disabled);
1345
1355 return size; 1346 return size;
1356 1347
1357 free_pages: 1348 free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1361 } 1352 }
1362 put_online_cpus(); 1353 put_online_cpus();
1363 mutex_unlock(&buffer->mutex); 1354 mutex_unlock(&buffer->mutex);
1355 atomic_dec(&buffer->record_disabled);
1364 return -ENOMEM; 1356 return -ENOMEM;
1365 1357
1366 /* 1358 /*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1370 out_fail: 1362 out_fail:
1371 put_online_cpus(); 1363 put_online_cpus();
1372 mutex_unlock(&buffer->mutex); 1364 mutex_unlock(&buffer->mutex);
1365 atomic_dec(&buffer->record_disabled);
1373 return -1; 1366 return -1;
1374} 1367}
1375EXPORT_SYMBOL_GPL(ring_buffer_resize); 1368EXPORT_SYMBOL_GPL(ring_buffer_resize);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bb6b5e7fa2a2..06ba26747d7e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -313,7 +313,6 @@ static const char *trace_options[] = {
313 "bin", 313 "bin",
314 "block", 314 "block",
315 "stacktrace", 315 "stacktrace",
316 "sched-tree",
317 "trace_printk", 316 "trace_printk",
318 "ftrace_preempt", 317 "ftrace_preempt",
319 "branch", 318 "branch",
@@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1150 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1152} 1151}
1153 1152
1153/**
1154 * trace_dump_stack - record a stack back trace in the trace buffer
1155 */
1156void trace_dump_stack(void)
1157{
1158 unsigned long flags;
1159
1160 if (tracing_disabled || tracing_selftest_running)
1161 return;
1162
1163 local_save_flags(flags);
1164
1165 /* skipping 3 traces, seems to get us at the caller of this function */
1166 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1167}
1168
1154void 1169void
1155ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1170ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1156{ 1171{
@@ -2316,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = {
2316 .write = tracing_cpumask_write, 2331 .write = tracing_cpumask_write,
2317}; 2332};
2318 2333
2319static ssize_t 2334static int tracing_trace_options_show(struct seq_file *m, void *v)
2320tracing_trace_options_read(struct file *filp, char __user *ubuf,
2321 size_t cnt, loff_t *ppos)
2322{ 2335{
2323 struct tracer_opt *trace_opts; 2336 struct tracer_opt *trace_opts;
2324 u32 tracer_flags; 2337 u32 tracer_flags;
2325 int len = 0;
2326 char *buf;
2327 int r = 0;
2328 int i; 2338 int i;
2329 2339
2330
2331 /* calculate max size */
2332 for (i = 0; trace_options[i]; i++) {
2333 len += strlen(trace_options[i]);
2334 len += 3; /* "no" and newline */
2335 }
2336
2337 mutex_lock(&trace_types_lock); 2340 mutex_lock(&trace_types_lock);
2338 tracer_flags = current_trace->flags->val; 2341 tracer_flags = current_trace->flags->val;
2339 trace_opts = current_trace->flags->opts; 2342 trace_opts = current_trace->flags->opts;
2340 2343
2341 /*
2342 * Increase the size with names of options specific
2343 * of the current tracer.
2344 */
2345 for (i = 0; trace_opts[i].name; i++) {
2346 len += strlen(trace_opts[i].name);
2347 len += 3; /* "no" and newline */
2348 }
2349
2350 /* +1 for \0 */
2351 buf = kmalloc(len + 1, GFP_KERNEL);
2352 if (!buf) {
2353 mutex_unlock(&trace_types_lock);
2354 return -ENOMEM;
2355 }
2356
2357 for (i = 0; trace_options[i]; i++) { 2344 for (i = 0; trace_options[i]; i++) {
2358 if (trace_flags & (1 << i)) 2345 if (trace_flags & (1 << i))
2359 r += sprintf(buf + r, "%s\n", trace_options[i]); 2346 seq_printf(m, "%s\n", trace_options[i]);
2360 else 2347 else
2361 r += sprintf(buf + r, "no%s\n", trace_options[i]); 2348 seq_printf(m, "no%s\n", trace_options[i]);
2362 } 2349 }
2363 2350
2364 for (i = 0; trace_opts[i].name; i++) { 2351 for (i = 0; trace_opts[i].name; i++) {
2365 if (tracer_flags & trace_opts[i].bit) 2352 if (tracer_flags & trace_opts[i].bit)
2366 r += sprintf(buf + r, "%s\n", 2353 seq_printf(m, "%s\n", trace_opts[i].name);
2367 trace_opts[i].name);
2368 else 2354 else
2369 r += sprintf(buf + r, "no%s\n", 2355 seq_printf(m, "no%s\n", trace_opts[i].name);
2370 trace_opts[i].name);
2371 } 2356 }
2372 mutex_unlock(&trace_types_lock); 2357 mutex_unlock(&trace_types_lock);
2373 2358
2374 WARN_ON(r >= len + 1); 2359 return 0;
2360}
2375 2361
2376 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2362static int __set_tracer_option(struct tracer *trace,
2363 struct tracer_flags *tracer_flags,
2364 struct tracer_opt *opts, int neg)
2365{
2366 int ret;
2377 2367
2378 kfree(buf); 2368 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2379 return r; 2369 if (ret)
2370 return ret;
2371
2372 if (neg)
2373 tracer_flags->val &= ~opts->bit;
2374 else
2375 tracer_flags->val |= opts->bit;
2376 return 0;
2380} 2377}
2381 2378
2382/* Try to assign a tracer specific option */ 2379/* Try to assign a tracer specific option */
@@ -2384,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2384{ 2381{
2385 struct tracer_flags *tracer_flags = trace->flags; 2382 struct tracer_flags *tracer_flags = trace->flags;
2386 struct tracer_opt *opts = NULL; 2383 struct tracer_opt *opts = NULL;
2387 int ret = 0, i = 0; 2384 int i;
2388 int len;
2389 2385
2390 for (i = 0; tracer_flags->opts[i].name; i++) { 2386 for (i = 0; tracer_flags->opts[i].name; i++) {
2391 opts = &tracer_flags->opts[i]; 2387 opts = &tracer_flags->opts[i];
2392 len = strlen(opts->name);
2393 2388
2394 if (strncmp(cmp, opts->name, len) == 0) { 2389 if (strcmp(cmp, opts->name) == 0)
2395 ret = trace->set_flag(tracer_flags->val, 2390 return __set_tracer_option(trace, trace->flags,
2396 opts->bit, !neg); 2391 opts, neg);
2397 break;
2398 }
2399 } 2392 }
2400 /* Not found */
2401 if (!tracer_flags->opts[i].name)
2402 return -EINVAL;
2403
2404 /* Refused to handle */
2405 if (ret)
2406 return ret;
2407
2408 if (neg)
2409 tracer_flags->val &= ~opts->bit;
2410 else
2411 tracer_flags->val |= opts->bit;
2412 2393
2413 return 0; 2394 return -EINVAL;
2414} 2395}
2415 2396
2416static void set_tracer_flags(unsigned int mask, int enabled) 2397static void set_tracer_flags(unsigned int mask, int enabled)
@@ -2430,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2430 size_t cnt, loff_t *ppos) 2411 size_t cnt, loff_t *ppos)
2431{ 2412{
2432 char buf[64]; 2413 char buf[64];
2433 char *cmp = buf; 2414 char *cmp;
2434 int neg = 0; 2415 int neg = 0;
2435 int ret; 2416 int ret;
2436 int i; 2417 int i;
@@ -2442,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2442 return -EFAULT; 2423 return -EFAULT;
2443 2424
2444 buf[cnt] = 0; 2425 buf[cnt] = 0;
2426 cmp = strstrip(buf);
2445 2427
2446 if (strncmp(buf, "no", 2) == 0) { 2428 if (strncmp(cmp, "no", 2) == 0) {
2447 neg = 1; 2429 neg = 1;
2448 cmp += 2; 2430 cmp += 2;
2449 } 2431 }
2450 2432
2451 for (i = 0; trace_options[i]; i++) { 2433 for (i = 0; trace_options[i]; i++) {
2452 int len = strlen(trace_options[i]); 2434 if (strcmp(cmp, trace_options[i]) == 0) {
2453
2454 if (strncmp(cmp, trace_options[i], len) == 0) {
2455 set_tracer_flags(1 << i, !neg); 2435 set_tracer_flags(1 << i, !neg);
2456 break; 2436 break;
2457 } 2437 }
@@ -2471,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2471 return cnt; 2451 return cnt;
2472} 2452}
2473 2453
2454static int tracing_trace_options_open(struct inode *inode, struct file *file)
2455{
2456 if (tracing_disabled)
2457 return -ENODEV;
2458 return single_open(file, tracing_trace_options_show, NULL);
2459}
2460
2474static const struct file_operations tracing_iter_fops = { 2461static const struct file_operations tracing_iter_fops = {
2475 .open = tracing_open_generic, 2462 .open = tracing_trace_options_open,
2476 .read = tracing_trace_options_read, 2463 .read = seq_read,
2464 .llseek = seq_lseek,
2465 .release = single_release,
2477 .write = tracing_trace_options_write, 2466 .write = tracing_trace_options_write,
2478}; 2467};
2479 2468
@@ -3133,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3133 __free_page(spd->pages[idx]); 3122 __free_page(spd->pages[idx]);
3134} 3123}
3135 3124
3136static struct pipe_buf_operations tracing_pipe_buf_ops = { 3125static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3137 .can_merge = 0, 3126 .can_merge = 0,
3138 .map = generic_pipe_buf_map, 3127 .map = generic_pipe_buf_map,
3139 .unmap = generic_pipe_buf_unmap, 3128 .unmap = generic_pipe_buf_unmap,
@@ -3392,21 +3381,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3392 return cnt; 3381 return cnt;
3393} 3382}
3394 3383
3395static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, 3384static int tracing_clock_show(struct seq_file *m, void *v)
3396 size_t cnt, loff_t *ppos)
3397{ 3385{
3398 char buf[64];
3399 int bufiter = 0;
3400 int i; 3386 int i;
3401 3387
3402 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3388 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3403 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, 3389 seq_printf(m,
3404 "%s%s%s%s", i ? " " : "", 3390 "%s%s%s%s", i ? " " : "",
3405 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3391 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3406 i == trace_clock_id ? "]" : ""); 3392 i == trace_clock_id ? "]" : "");
3407 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); 3393 seq_putc(m, '\n');
3408 3394
3409 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); 3395 return 0;
3410} 3396}
3411 3397
3412static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 3398static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
@@ -3448,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3448 return cnt; 3434 return cnt;
3449} 3435}
3450 3436
3437static int tracing_clock_open(struct inode *inode, struct file *file)
3438{
3439 if (tracing_disabled)
3440 return -ENODEV;
3441 return single_open(file, tracing_clock_show, NULL);
3442}
3443
3451static const struct file_operations tracing_max_lat_fops = { 3444static const struct file_operations tracing_max_lat_fops = {
3452 .open = tracing_open_generic, 3445 .open = tracing_open_generic,
3453 .read = tracing_max_lat_read, 3446 .read = tracing_max_lat_read,
@@ -3486,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = {
3486}; 3479};
3487 3480
3488static const struct file_operations trace_clock_fops = { 3481static const struct file_operations trace_clock_fops = {
3489 .open = tracing_open_generic, 3482 .open = tracing_clock_open,
3490 .read = tracing_clock_read, 3483 .read = seq_read,
3484 .llseek = seq_lseek,
3485 .release = single_release,
3491 .write = tracing_clock_write, 3486 .write = tracing_clock_write,
3492}; 3487};
3493 3488
@@ -3617,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3617} 3612}
3618 3613
3619/* Pipe buffer operations for a buffer. */ 3614/* Pipe buffer operations for a buffer. */
3620static struct pipe_buf_operations buffer_pipe_buf_ops = { 3615static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3621 .can_merge = 0, 3616 .can_merge = 0,
3622 .map = generic_pipe_buf_map, 3617 .map = generic_pipe_buf_map,
3623 .unmap = generic_pipe_buf_unmap, 3618 .unmap = generic_pipe_buf_unmap,
@@ -3948,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3948 if (ret < 0) 3943 if (ret < 0)
3949 return ret; 3944 return ret;
3950 3945
3951 ret = 0; 3946 if (val != 0 && val != 1)
3952 switch (val) { 3947 return -EINVAL;
3953 case 0:
3954 /* do nothing if already cleared */
3955 if (!(topt->flags->val & topt->opt->bit))
3956 break;
3957
3958 mutex_lock(&trace_types_lock);
3959 if (current_trace->set_flag)
3960 ret = current_trace->set_flag(topt->flags->val,
3961 topt->opt->bit, 0);
3962 mutex_unlock(&trace_types_lock);
3963 if (ret)
3964 return ret;
3965 topt->flags->val &= ~topt->opt->bit;
3966 break;
3967 case 1:
3968 /* do nothing if already set */
3969 if (topt->flags->val & topt->opt->bit)
3970 break;
3971 3948
3949 if (!!(topt->flags->val & topt->opt->bit) != val) {
3972 mutex_lock(&trace_types_lock); 3950 mutex_lock(&trace_types_lock);
3973 if (current_trace->set_flag) 3951 ret = __set_tracer_option(current_trace, topt->flags,
3974 ret = current_trace->set_flag(topt->flags->val, 3952 topt->opt, val);
3975 topt->opt->bit, 1);
3976 mutex_unlock(&trace_types_lock); 3953 mutex_unlock(&trace_types_lock);
3977 if (ret) 3954 if (ret)
3978 return ret; 3955 return ret;
3979 topt->flags->val |= topt->opt->bit;
3980 break;
3981
3982 default:
3983 return -EINVAL;
3984 } 3956 }
3985 3957
3986 *ppos += cnt; 3958 *ppos += cnt;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a52bed2eedd8..4df6a77eb196 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,18 +597,17 @@ enum trace_iterator_flags {
597 TRACE_ITER_BIN = 0x40, 597 TRACE_ITER_BIN = 0x40,
598 TRACE_ITER_BLOCK = 0x80, 598 TRACE_ITER_BLOCK = 0x80,
599 TRACE_ITER_STACKTRACE = 0x100, 599 TRACE_ITER_STACKTRACE = 0x100,
600 TRACE_ITER_SCHED_TREE = 0x200, 600 TRACE_ITER_PRINTK = 0x200,
601 TRACE_ITER_PRINTK = 0x400, 601 TRACE_ITER_PREEMPTONLY = 0x400,
602 TRACE_ITER_PREEMPTONLY = 0x800, 602 TRACE_ITER_BRANCH = 0x800,
603 TRACE_ITER_BRANCH = 0x1000, 603 TRACE_ITER_ANNOTATE = 0x1000,
604 TRACE_ITER_ANNOTATE = 0x2000, 604 TRACE_ITER_USERSTACKTRACE = 0x2000,
605 TRACE_ITER_USERSTACKTRACE = 0x4000, 605 TRACE_ITER_SYM_USEROBJ = 0x4000,
606 TRACE_ITER_SYM_USEROBJ = 0x8000, 606 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
607 TRACE_ITER_PRINTK_MSGONLY = 0x10000, 607 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
608 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ 608 TRACE_ITER_LATENCY_FMT = 0x20000,
609 TRACE_ITER_LATENCY_FMT = 0x40000, 609 TRACE_ITER_SLEEP_TIME = 0x40000,
610 TRACE_ITER_SLEEP_TIME = 0x80000, 610 TRACE_ITER_GRAPH_TIME = 0x80000,
611 TRACE_ITER_GRAPH_TIME = 0x100000,
612}; 611};
613 612
614/* 613/*
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index d9c60f80aa0d..9e25573242cf 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -25,7 +25,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
25 char *buf; 25 char *buf;
26 int ret = -ENOMEM; 26 int ret = -ENOMEM;
27 27
28 if (atomic_inc_return(&event->profile_count)) 28 if (event->profile_count++ > 0)
29 return 0; 29 return 0;
30 30
31 if (!total_profile_count) { 31 if (!total_profile_count) {
@@ -56,7 +56,7 @@ fail_buf_nmi:
56 perf_trace_buf = NULL; 56 perf_trace_buf = NULL;
57 } 57 }
58fail_buf: 58fail_buf:
59 atomic_dec(&event->profile_count); 59 event->profile_count--;
60 60
61 return ret; 61 return ret;
62} 62}
@@ -83,7 +83,7 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
83{ 83{
84 char *buf, *nmi_buf; 84 char *buf, *nmi_buf;
85 85
86 if (!atomic_add_negative(-1, &event->profile_count)) 86 if (--event->profile_count > 0)
87 return; 87 return;
88 88
89 event->profile_disable(event); 89 event->profile_disable(event);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 1d18315dc836..189b09baf4fb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
78 if (ret) \ 78 if (ret) \
79 return ret; 79 return ret;
80 80
81int trace_define_common_fields(struct ftrace_event_call *call) 81static int trace_define_common_fields(struct ftrace_event_call *call)
82{ 82{
83 int ret; 83 int ret;
84 struct trace_entry ent; 84 struct trace_entry ent;
@@ -91,7 +91,6 @@ int trace_define_common_fields(struct ftrace_event_call *call)
91 91
92 return ret; 92 return ret;
93} 93}
94EXPORT_SYMBOL_GPL(trace_define_common_fields);
95 94
96void trace_destroy_fields(struct ftrace_event_call *call) 95void trace_destroy_fields(struct ftrace_event_call *call)
97{ 96{
@@ -105,9 +104,25 @@ void trace_destroy_fields(struct ftrace_event_call *call)
105 } 104 }
106} 105}
107 106
108static void ftrace_event_enable_disable(struct ftrace_event_call *call, 107int trace_event_raw_init(struct ftrace_event_call *call)
108{
109 int id;
110
111 id = register_ftrace_event(call->event);
112 if (!id)
113 return -ENODEV;
114 call->id = id;
115 INIT_LIST_HEAD(&call->fields);
116
117 return 0;
118}
119EXPORT_SYMBOL_GPL(trace_event_raw_init);
120
121static int ftrace_event_enable_disable(struct ftrace_event_call *call,
109 int enable) 122 int enable)
110{ 123{
124 int ret = 0;
125
111 switch (enable) { 126 switch (enable) {
112 case 0: 127 case 0:
113 if (call->enabled) { 128 if (call->enabled) {
@@ -118,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
118 break; 133 break;
119 case 1: 134 case 1:
120 if (!call->enabled) { 135 if (!call->enabled) {
121 call->enabled = 1;
122 tracing_start_cmdline_record(); 136 tracing_start_cmdline_record();
123 call->regfunc(call); 137 ret = call->regfunc(call);
138 if (ret) {
139 tracing_stop_cmdline_record();
140 pr_info("event trace: Could not enable event "
141 "%s\n", call->name);
142 break;
143 }
144 call->enabled = 1;
124 } 145 }
125 break; 146 break;
126 } 147 }
148
149 return ret;
127} 150}
128 151
129static void ftrace_clear_events(void) 152static void ftrace_clear_events(void)
@@ -402,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
402 case 0: 425 case 0:
403 case 1: 426 case 1:
404 mutex_lock(&event_mutex); 427 mutex_lock(&event_mutex);
405 ftrace_event_enable_disable(call, val); 428 ret = ftrace_event_enable_disable(call, val);
406 mutex_unlock(&event_mutex); 429 mutex_unlock(&event_mutex);
407 break; 430 break;
408 431
@@ -412,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
412 435
413 *ppos += cnt; 436 *ppos += cnt;
414 437
415 return cnt; 438 return ret ? ret : cnt;
416} 439}
417 440
418static ssize_t 441static ssize_t
@@ -913,7 +936,9 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
913 id); 936 id);
914 937
915 if (call->define_fields) { 938 if (call->define_fields) {
916 ret = call->define_fields(call); 939 ret = trace_define_common_fields(call);
940 if (!ret)
941 ret = call->define_fields(call);
917 if (ret < 0) { 942 if (ret < 0) {
918 pr_warning("Could not initialize trace point" 943 pr_warning("Could not initialize trace point"
919 " events/%s\n", call->name); 944 " events/%s\n", call->name);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index dff8c84ddf17..458e5bfe26d0 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -184,10 +184,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
184 struct struct_name field; \ 184 struct struct_name field; \
185 int ret; \ 185 int ret; \
186 \ 186 \
187 ret = trace_define_common_fields(event_call); \
188 if (ret) \
189 return ret; \
190 \
191 tstruct; \ 187 tstruct; \
192 \ 188 \
193 return ret; \ 189 return ret; \
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 3aa7eaa2114c..2974bc7538c7 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -151,6 +151,8 @@ check_critical_timing(struct trace_array *tr,
151 goto out_unlock; 151 goto out_unlock;
152 152
153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
154 /* Skip 5 functions to get to the irq/preempt enable function */
155 __trace_stack(tr, flags, 5, pc);
154 156
155 if (data->critical_sequence != max_sequence) 157 if (data->critical_sequence != max_sequence)
156 goto out_unlock; 158 goto out_unlock;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b52d397e57eb..7ecab06547a5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1132,10 +1132,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1132 struct kprobe_trace_entry field; 1132 struct kprobe_trace_entry field;
1133 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1133 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1134 1134
1135 ret = trace_define_common_fields(event_call);
1136 if (ret)
1137 return ret;
1138
1139 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1135 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1140 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1136 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1141 /* Set argument names as fields */ 1137 /* Set argument names as fields */
@@ -1150,10 +1146,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1150 struct kretprobe_trace_entry field; 1146 struct kretprobe_trace_entry field;
1151 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1147 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1152 1148
1153 ret = trace_define_common_fields(event_call);
1154 if (ret)
1155 return ret;
1156
1157 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1149 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1158 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1150 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1159 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1151 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
@@ -1453,7 +1445,6 @@ static int register_probe_event(struct trace_probe *tp)
1453 call->unregfunc = probe_event_disable; 1445 call->unregfunc = probe_event_disable;
1454 1446
1455#ifdef CONFIG_EVENT_PROFILE 1447#ifdef CONFIG_EVENT_PROFILE
1456 atomic_set(&call->profile_count, -1);
1457 call->profile_enable = probe_profile_enable; 1448 call->profile_enable = probe_profile_enable;
1458 call->profile_disable = probe_profile_disable; 1449 call->profile_disable = probe_profile_disable;
1459#endif 1450#endif
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index acb87d4a4ac1..faf37fa4408c 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -236,7 +236,8 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
236 mutex_lock(&ksym_tracer_mutex); 236 mutex_lock(&ksym_tracer_mutex);
237 237
238 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { 238 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
239 ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); 239 ret = trace_seq_printf(s, "%pS:",
240 (void *)(unsigned long)entry->attr.bp_addr);
240 if (entry->attr.bp_type == HW_BREAKPOINT_R) 241 if (entry->attr.bp_type == HW_BREAKPOINT_R)
241 ret = trace_seq_puts(s, "r--\n"); 242 ret = trace_seq_puts(s, "r--\n");
242 else if (entry->attr.bp_type == HW_BREAKPOINT_W) 243 else if (entry->attr.bp_type == HW_BREAKPOINT_W)
@@ -278,21 +279,20 @@ static ssize_t ksym_trace_filter_write(struct file *file,
278{ 279{
279 struct trace_ksym *entry; 280 struct trace_ksym *entry;
280 struct hlist_node *node; 281 struct hlist_node *node;
281 char *input_string, *ksymname = NULL; 282 char *buf, *input_string, *ksymname = NULL;
282 unsigned long ksym_addr = 0; 283 unsigned long ksym_addr = 0;
283 int ret, op, changed = 0; 284 int ret, op, changed = 0;
284 285
285 input_string = kzalloc(count + 1, GFP_KERNEL); 286 buf = kzalloc(count + 1, GFP_KERNEL);
286 if (!input_string) 287 if (!buf)
287 return -ENOMEM; 288 return -ENOMEM;
288 289
289 if (copy_from_user(input_string, buffer, count)) { 290 ret = -EFAULT;
290 kfree(input_string); 291 if (copy_from_user(buf, buffer, count))
291 return -EFAULT; 292 goto out;
292 }
293 input_string[count] = '\0';
294 293
295 strstrip(input_string); 294 buf[count] = '\0';
295 input_string = strstrip(buf);
296 296
297 /* 297 /*
298 * Clear all breakpoints if: 298 * Clear all breakpoints if:
@@ -300,18 +300,16 @@ static ssize_t ksym_trace_filter_write(struct file *file,
300 * 2: echo 0 > ksym_trace_filter 300 * 2: echo 0 > ksym_trace_filter
301 * 3: echo "*:---" > ksym_trace_filter 301 * 3: echo "*:---" > ksym_trace_filter
302 */ 302 */
303 if (!input_string[0] || !strcmp(input_string, "0") || 303 if (!buf[0] || !strcmp(buf, "0") ||
304 !strcmp(input_string, "*:---")) { 304 !strcmp(buf, "*:---")) {
305 __ksym_trace_reset(); 305 __ksym_trace_reset();
306 kfree(input_string); 306 ret = 0;
307 return count; 307 goto out;
308 } 308 }
309 309
310 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); 310 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
311 if (ret < 0) { 311 if (ret < 0)
312 kfree(input_string); 312 goto out;
313 return ret;
314 }
315 313
316 mutex_lock(&ksym_tracer_mutex); 314 mutex_lock(&ksym_tracer_mutex);
317 315
@@ -322,7 +320,7 @@ static ssize_t ksym_trace_filter_write(struct file *file,
322 if (entry->attr.bp_type != op) 320 if (entry->attr.bp_type != op)
323 changed = 1; 321 changed = 1;
324 else 322 else
325 goto out; 323 goto out_unlock;
326 break; 324 break;
327 } 325 }
328 } 326 }
@@ -337,28 +335,24 @@ static ssize_t ksym_trace_filter_write(struct file *file,
337 if (IS_ERR(entry->ksym_hbp)) 335 if (IS_ERR(entry->ksym_hbp))
338 ret = PTR_ERR(entry->ksym_hbp); 336 ret = PTR_ERR(entry->ksym_hbp);
339 else 337 else
340 goto out; 338 goto out_unlock;
341 } 339 }
342 /* Error or "symbol:---" case: drop it */ 340 /* Error or "symbol:---" case: drop it */
343 ksym_filter_entry_count--; 341 ksym_filter_entry_count--;
344 hlist_del_rcu(&(entry->ksym_hlist)); 342 hlist_del_rcu(&(entry->ksym_hlist));
345 synchronize_rcu(); 343 synchronize_rcu();
346 kfree(entry); 344 kfree(entry);
347 goto out; 345 goto out_unlock;
348 } else { 346 } else {
349 /* Check for malformed request: (4) */ 347 /* Check for malformed request: (4) */
350 if (op == 0) 348 if (op)
351 goto out; 349 ret = process_new_ksym_entry(ksymname, op, ksym_addr);
352 ret = process_new_ksym_entry(ksymname, op, ksym_addr);
353 } 350 }
354out: 351out_unlock:
355 mutex_unlock(&ksym_tracer_mutex); 352 mutex_unlock(&ksym_tracer_mutex);
356 353out:
357 kfree(input_string); 354 kfree(buf);
358 355 return !ret ? count : ret;
359 if (!ret)
360 ret = count;
361 return ret;
362} 356}
363 357
364static const struct file_operations ksym_tracing_fops = { 358static const struct file_operations ksym_tracing_fops = {
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 57501d90096a..75289f372dd2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -217,10 +217,6 @@ int syscall_enter_define_fields(struct ftrace_event_call *call)
217 int i; 217 int i;
218 int offset = offsetof(typeof(trace), args); 218 int offset = offsetof(typeof(trace), args);
219 219
220 ret = trace_define_common_fields(call);
221 if (ret)
222 return ret;
223
224 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 220 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
225 if (ret) 221 if (ret)
226 return ret; 222 return ret;
@@ -241,10 +237,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
241 struct syscall_trace_exit trace; 237 struct syscall_trace_exit trace;
242 int ret; 238 int ret;
243 239
244 ret = trace_define_common_fields(call);
245 if (ret)
246 return ret;
247
248 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 240 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
249 if (ret) 241 if (ret)
250 return ret; 242 return ret;
@@ -333,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
333 mutex_lock(&syscall_trace_lock); 325 mutex_lock(&syscall_trace_lock);
334 if (!sys_refcount_enter) 326 if (!sys_refcount_enter)
335 ret = register_trace_sys_enter(ftrace_syscall_enter); 327 ret = register_trace_sys_enter(ftrace_syscall_enter);
336 if (ret) { 328 if (!ret) {
337 pr_info("event trace: Could not activate"
338 "syscall entry trace point");
339 } else {
340 set_bit(num, enabled_enter_syscalls); 329 set_bit(num, enabled_enter_syscalls);
341 sys_refcount_enter++; 330 sys_refcount_enter++;
342 } 331 }
@@ -370,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
370 mutex_lock(&syscall_trace_lock); 359 mutex_lock(&syscall_trace_lock);
371 if (!sys_refcount_exit) 360 if (!sys_refcount_exit)
372 ret = register_trace_sys_exit(ftrace_syscall_exit); 361 ret = register_trace_sys_exit(ftrace_syscall_exit);
373 if (ret) { 362 if (!ret) {
374 pr_info("event trace: Could not activate"
375 "syscall exit trace point");
376 } else {
377 set_bit(num, enabled_exit_syscalls); 363 set_bit(num, enabled_exit_syscalls);
378 sys_refcount_exit++; 364 sys_refcount_exit++;
379 } 365 }