diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bounds.c | 2 | ||||
| -rw-r--r-- | kernel/exit.c | 36 | ||||
| -rw-r--r-- | kernel/fork.c | 11 | ||||
| -rw-r--r-- | kernel/kexec.c | 61 | ||||
| -rw-r--r-- | kernel/ksysfs.c | 21 | ||||
| -rw-r--r-- | kernel/module.c | 41 | ||||
| -rw-r--r-- | kernel/panic.c | 3 | ||||
| -rw-r--r-- | kernel/perf_event.c | 15 | ||||
| -rw-r--r-- | kernel/pid.c | 12 | ||||
| -rw-r--r-- | kernel/printk.c | 119 | ||||
| -rw-r--r-- | kernel/relay.c | 2 | ||||
| -rw-r--r-- | kernel/signal.c | 38 | ||||
| -rw-r--r-- | kernel/sysctl.c | 2 | ||||
| -rw-r--r-- | kernel/sysctl_binary.c | 7 | ||||
| -rw-r--r-- | kernel/time/timecompare.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 30 | ||||
| -rw-r--r-- | kernel/trace/power-traces.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 29 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 188 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 23 | ||||
| -rw-r--r-- | kernel/trace/trace_event_profile.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 41 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 9 | ||||
| -rw-r--r-- | kernel/trace/trace_ksym.c | 56 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 18 |
27 files changed, 489 insertions, 291 deletions
diff --git a/kernel/bounds.c b/kernel/bounds.c index 3c5301381837..98a51f26c136 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | void foo(void) | 13 | void foo(void) |
| 14 | { | 14 | { |
| 15 | /* The enum constants to put into include/linux/bounds.h */ | 15 | /* The enum constants to put into include/generated/bounds.h */ |
| 16 | DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); | 16 | DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); |
| 17 | DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); | 17 | DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); |
| 18 | /* End of constants */ | 18 | /* End of constants */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 5962d7ccf243..546774a31a66 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -68,10 +68,10 @@ static void __unhash_process(struct task_struct *p) | |||
| 68 | detach_pid(p, PIDTYPE_SID); | 68 | detach_pid(p, PIDTYPE_SID); |
| 69 | 69 | ||
| 70 | list_del_rcu(&p->tasks); | 70 | list_del_rcu(&p->tasks); |
| 71 | list_del_init(&p->sibling); | ||
| 71 | __get_cpu_var(process_counts)--; | 72 | __get_cpu_var(process_counts)--; |
| 72 | } | 73 | } |
| 73 | list_del_rcu(&p->thread_group); | 74 | list_del_rcu(&p->thread_group); |
| 74 | list_del_init(&p->sibling); | ||
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | /* | 77 | /* |
| @@ -736,12 +736,9 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
| 736 | /* | 736 | /* |
| 737 | * Any that need to be release_task'd are put on the @dead list. | 737 | * Any that need to be release_task'd are put on the @dead list. |
| 738 | */ | 738 | */ |
| 739 | static void reparent_thread(struct task_struct *father, struct task_struct *p, | 739 | static void reparent_leader(struct task_struct *father, struct task_struct *p, |
| 740 | struct list_head *dead) | 740 | struct list_head *dead) |
| 741 | { | 741 | { |
| 742 | if (p->pdeath_signal) | ||
| 743 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
| 744 | |||
| 745 | list_move_tail(&p->sibling, &p->real_parent->children); | 742 | list_move_tail(&p->sibling, &p->real_parent->children); |
| 746 | 743 | ||
| 747 | if (task_detached(p)) | 744 | if (task_detached(p)) |
| @@ -780,12 +777,18 @@ static void forget_original_parent(struct task_struct *father) | |||
| 780 | reaper = find_new_reaper(father); | 777 | reaper = find_new_reaper(father); |
| 781 | 778 | ||
| 782 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 779 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
| 783 | p->real_parent = reaper; | 780 | struct task_struct *t = p; |
| 784 | if (p->parent == father) { | 781 | do { |
| 785 | BUG_ON(task_ptrace(p)); | 782 | t->real_parent = reaper; |
| 786 | p->parent = p->real_parent; | 783 | if (t->parent == father) { |
| 787 | } | 784 | BUG_ON(task_ptrace(t)); |
| 788 | reparent_thread(father, p, &dead_children); | 785 | t->parent = t->real_parent; |
| 786 | } | ||
| 787 | if (t->pdeath_signal) | ||
| 788 | group_send_sig_info(t->pdeath_signal, | ||
| 789 | SEND_SIG_NOINFO, t); | ||
| 790 | } while_each_thread(p, t); | ||
| 791 | reparent_leader(father, p, &dead_children); | ||
| 789 | } | 792 | } |
| 790 | write_unlock_irq(&tasklist_lock); | 793 | write_unlock_irq(&tasklist_lock); |
| 791 | 794 | ||
| @@ -1551,14 +1554,9 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) | |||
| 1551 | struct task_struct *p; | 1554 | struct task_struct *p; |
| 1552 | 1555 | ||
| 1553 | list_for_each_entry(p, &tsk->children, sibling) { | 1556 | list_for_each_entry(p, &tsk->children, sibling) { |
| 1554 | /* | 1557 | int ret = wait_consider_task(wo, 0, p); |
| 1555 | * Do not consider detached threads. | 1558 | if (ret) |
| 1556 | */ | 1559 | return ret; |
| 1557 | if (!task_detached(p)) { | ||
| 1558 | int ret = wait_consider_task(wo, 0, p); | ||
| 1559 | if (ret) | ||
| 1560 | return ret; | ||
| 1561 | } | ||
| 1562 | } | 1560 | } |
| 1563 | 1561 | ||
| 1564 | return 0; | 1562 | return 0; |
diff --git a/kernel/fork.c b/kernel/fork.c index 9bd91447e052..5b2959b3ffc2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1127,6 +1127,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1127 | #ifdef CONFIG_DEBUG_MUTEXES | 1127 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1128 | p->blocked_on = NULL; /* not blocked yet */ | 1128 | p->blocked_on = NULL; /* not blocked yet */ |
| 1129 | #endif | 1129 | #endif |
| 1130 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
| 1131 | p->memcg_batch.do_batch = 0; | ||
| 1132 | p->memcg_batch.memcg = NULL; | ||
| 1133 | #endif | ||
| 1130 | 1134 | ||
| 1131 | p->bts = NULL; | 1135 | p->bts = NULL; |
| 1132 | 1136 | ||
| @@ -1206,9 +1210,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1206 | p->sas_ss_sp = p->sas_ss_size = 0; | 1210 | p->sas_ss_sp = p->sas_ss_size = 0; |
| 1207 | 1211 | ||
| 1208 | /* | 1212 | /* |
| 1209 | * Syscall tracing should be turned off in the child regardless | 1213 | * Syscall tracing and stepping should be turned off in the |
| 1210 | * of CLONE_PTRACE. | 1214 | * child regardless of CLONE_PTRACE. |
| 1211 | */ | 1215 | */ |
| 1216 | user_disable_single_step(p); | ||
| 1212 | clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); | 1217 | clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); |
| 1213 | #ifdef TIF_SYSCALL_EMU | 1218 | #ifdef TIF_SYSCALL_EMU |
| 1214 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); | 1219 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); |
| @@ -1286,7 +1291,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1286 | } | 1291 | } |
| 1287 | 1292 | ||
| 1288 | if (likely(p->pid)) { | 1293 | if (likely(p->pid)) { |
| 1289 | list_add_tail(&p->sibling, &p->real_parent->children); | ||
| 1290 | tracehook_finish_clone(p, clone_flags, trace); | 1294 | tracehook_finish_clone(p, clone_flags, trace); |
| 1291 | 1295 | ||
| 1292 | if (thread_group_leader(p)) { | 1296 | if (thread_group_leader(p)) { |
| @@ -1298,6 +1302,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1298 | p->signal->tty = tty_kref_get(current->signal->tty); | 1302 | p->signal->tty = tty_kref_get(current->signal->tty); |
| 1299 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1303 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
| 1300 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1304 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
| 1305 | list_add_tail(&p->sibling, &p->real_parent->children); | ||
| 1301 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1306 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
| 1302 | __get_cpu_var(process_counts)++; | 1307 | __get_cpu_var(process_counts)++; |
| 1303 | } | 1308 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index f336e2107f98..a9a93d9ee7a7 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #include <linux/hardirq.h> | 21 | #include <linux/hardirq.h> |
| 22 | #include <linux/elf.h> | 22 | #include <linux/elf.h> |
| 23 | #include <linux/elfcore.h> | 23 | #include <linux/elfcore.h> |
| 24 | #include <linux/utsrelease.h> | 24 | #include <generated/utsrelease.h> |
| 25 | #include <linux/utsname.h> | 25 | #include <linux/utsname.h> |
| 26 | #include <linux/numa.h> | 26 | #include <linux/numa.h> |
| 27 | #include <linux/suspend.h> | 27 | #include <linux/suspend.h> |
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
| 32 | #include <linux/console.h> | 32 | #include <linux/console.h> |
| 33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
| 34 | #include <linux/swap.h> | ||
| 34 | 35 | ||
| 35 | #include <asm/page.h> | 36 | #include <asm/page.h> |
| 36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| @@ -1082,6 +1083,64 @@ void crash_kexec(struct pt_regs *regs) | |||
| 1082 | } | 1083 | } |
| 1083 | } | 1084 | } |
| 1084 | 1085 | ||
| 1086 | size_t crash_get_memory_size(void) | ||
| 1087 | { | ||
| 1088 | size_t size; | ||
| 1089 | mutex_lock(&kexec_mutex); | ||
| 1090 | size = crashk_res.end - crashk_res.start + 1; | ||
| 1091 | mutex_unlock(&kexec_mutex); | ||
| 1092 | return size; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | static void free_reserved_phys_range(unsigned long begin, unsigned long end) | ||
| 1096 | { | ||
| 1097 | unsigned long addr; | ||
| 1098 | |||
| 1099 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
| 1100 | ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); | ||
| 1101 | init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); | ||
| 1102 | free_page((unsigned long)__va(addr)); | ||
| 1103 | totalram_pages++; | ||
| 1104 | } | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | int crash_shrink_memory(unsigned long new_size) | ||
| 1108 | { | ||
| 1109 | int ret = 0; | ||
| 1110 | unsigned long start, end; | ||
| 1111 | |||
| 1112 | mutex_lock(&kexec_mutex); | ||
| 1113 | |||
| 1114 | if (kexec_crash_image) { | ||
| 1115 | ret = -ENOENT; | ||
| 1116 | goto unlock; | ||
| 1117 | } | ||
| 1118 | start = crashk_res.start; | ||
| 1119 | end = crashk_res.end; | ||
| 1120 | |||
| 1121 | if (new_size >= end - start + 1) { | ||
| 1122 | ret = -EINVAL; | ||
| 1123 | if (new_size == end - start + 1) | ||
| 1124 | ret = 0; | ||
| 1125 | goto unlock; | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | start = roundup(start, PAGE_SIZE); | ||
| 1129 | end = roundup(start + new_size, PAGE_SIZE); | ||
| 1130 | |||
| 1131 | free_reserved_phys_range(end, crashk_res.end); | ||
| 1132 | |||
| 1133 | if (start == end) { | ||
| 1134 | crashk_res.end = end; | ||
| 1135 | release_resource(&crashk_res); | ||
| 1136 | } else | ||
| 1137 | crashk_res.end = end - 1; | ||
| 1138 | |||
| 1139 | unlock: | ||
| 1140 | mutex_unlock(&kexec_mutex); | ||
| 1141 | return ret; | ||
| 1142 | } | ||
| 1143 | |||
| 1085 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, | 1144 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
| 1086 | size_t data_len) | 1145 | size_t data_len) |
| 1087 | { | 1146 | { |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 528dd78e7e7e..3feaf5a74514 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
| @@ -100,6 +100,26 @@ static ssize_t kexec_crash_loaded_show(struct kobject *kobj, | |||
| 100 | } | 100 | } |
| 101 | KERNEL_ATTR_RO(kexec_crash_loaded); | 101 | KERNEL_ATTR_RO(kexec_crash_loaded); |
| 102 | 102 | ||
| 103 | static ssize_t kexec_crash_size_show(struct kobject *kobj, | ||
| 104 | struct kobj_attribute *attr, char *buf) | ||
| 105 | { | ||
| 106 | return sprintf(buf, "%zu\n", crash_get_memory_size()); | ||
| 107 | } | ||
| 108 | static ssize_t kexec_crash_size_store(struct kobject *kobj, | ||
| 109 | struct kobj_attribute *attr, | ||
| 110 | const char *buf, size_t count) | ||
| 111 | { | ||
| 112 | unsigned long cnt; | ||
| 113 | int ret; | ||
| 114 | |||
| 115 | if (strict_strtoul(buf, 0, &cnt)) | ||
| 116 | return -EINVAL; | ||
| 117 | |||
| 118 | ret = crash_shrink_memory(cnt); | ||
| 119 | return ret < 0 ? ret : count; | ||
| 120 | } | ||
| 121 | KERNEL_ATTR_RW(kexec_crash_size); | ||
| 122 | |||
| 103 | static ssize_t vmcoreinfo_show(struct kobject *kobj, | 123 | static ssize_t vmcoreinfo_show(struct kobject *kobj, |
| 104 | struct kobj_attribute *attr, char *buf) | 124 | struct kobj_attribute *attr, char *buf) |
| 105 | { | 125 | { |
| @@ -147,6 +167,7 @@ static struct attribute * kernel_attrs[] = { | |||
| 147 | #ifdef CONFIG_KEXEC | 167 | #ifdef CONFIG_KEXEC |
| 148 | &kexec_loaded_attr.attr, | 168 | &kexec_loaded_attr.attr, |
| 149 | &kexec_crash_loaded_attr.attr, | 169 | &kexec_crash_loaded_attr.attr, |
| 170 | &kexec_crash_size_attr.attr, | ||
| 150 | &vmcoreinfo_attr.attr, | 171 | &vmcoreinfo_attr.attr, |
| 151 | #endif | 172 | #endif |
| 152 | NULL | 173 | NULL |
diff --git a/kernel/module.c b/kernel/module.c index 12afc5a3ddd3..e96b8ed1cb6a 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -880,11 +880,23 @@ static int try_to_force_load(struct module *mod, const char *reason) | |||
| 880 | } | 880 | } |
| 881 | 881 | ||
| 882 | #ifdef CONFIG_MODVERSIONS | 882 | #ifdef CONFIG_MODVERSIONS |
| 883 | /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ | ||
| 884 | static unsigned long maybe_relocated(unsigned long crc, | ||
| 885 | const struct module *crc_owner) | ||
| 886 | { | ||
| 887 | #ifdef ARCH_RELOCATES_KCRCTAB | ||
| 888 | if (crc_owner == NULL) | ||
| 889 | return crc - (unsigned long)reloc_start; | ||
| 890 | #endif | ||
| 891 | return crc; | ||
| 892 | } | ||
| 893 | |||
| 883 | static int check_version(Elf_Shdr *sechdrs, | 894 | static int check_version(Elf_Shdr *sechdrs, |
| 884 | unsigned int versindex, | 895 | unsigned int versindex, |
| 885 | const char *symname, | 896 | const char *symname, |
| 886 | struct module *mod, | 897 | struct module *mod, |
| 887 | const unsigned long *crc) | 898 | const unsigned long *crc, |
| 899 | const struct module *crc_owner) | ||
| 888 | { | 900 | { |
| 889 | unsigned int i, num_versions; | 901 | unsigned int i, num_versions; |
| 890 | struct modversion_info *versions; | 902 | struct modversion_info *versions; |
| @@ -905,10 +917,10 @@ static int check_version(Elf_Shdr *sechdrs, | |||
| 905 | if (strcmp(versions[i].name, symname) != 0) | 917 | if (strcmp(versions[i].name, symname) != 0) |
| 906 | continue; | 918 | continue; |
| 907 | 919 | ||
| 908 | if (versions[i].crc == *crc) | 920 | if (versions[i].crc == maybe_relocated(*crc, crc_owner)) |
| 909 | return 1; | 921 | return 1; |
| 910 | DEBUGP("Found checksum %lX vs module %lX\n", | 922 | DEBUGP("Found checksum %lX vs module %lX\n", |
| 911 | *crc, versions[i].crc); | 923 | maybe_relocated(*crc, crc_owner), versions[i].crc); |
| 912 | goto bad_version; | 924 | goto bad_version; |
| 913 | } | 925 | } |
| 914 | 926 | ||
| @@ -931,7 +943,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
| 931 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, | 943 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, |
| 932 | &crc, true, false)) | 944 | &crc, true, false)) |
| 933 | BUG(); | 945 | BUG(); |
| 934 | return check_version(sechdrs, versindex, "module_layout", mod, crc); | 946 | return check_version(sechdrs, versindex, "module_layout", mod, crc, |
| 947 | NULL); | ||
| 935 | } | 948 | } |
| 936 | 949 | ||
| 937 | /* First part is kernel version, which we ignore if module has crcs. */ | 950 | /* First part is kernel version, which we ignore if module has crcs. */ |
| @@ -949,7 +962,8 @@ static inline int check_version(Elf_Shdr *sechdrs, | |||
| 949 | unsigned int versindex, | 962 | unsigned int versindex, |
| 950 | const char *symname, | 963 | const char *symname, |
| 951 | struct module *mod, | 964 | struct module *mod, |
| 952 | const unsigned long *crc) | 965 | const unsigned long *crc, |
| 966 | const struct module *crc_owner) | ||
| 953 | { | 967 | { |
| 954 | return 1; | 968 | return 1; |
| 955 | } | 969 | } |
| @@ -984,8 +998,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, | |||
| 984 | /* use_module can fail due to OOM, | 998 | /* use_module can fail due to OOM, |
| 985 | or module initialization or unloading */ | 999 | or module initialization or unloading */ |
| 986 | if (sym) { | 1000 | if (sym) { |
| 987 | if (!check_version(sechdrs, versindex, name, mod, crc) || | 1001 | if (!check_version(sechdrs, versindex, name, mod, crc, owner) |
| 988 | !use_module(mod, owner)) | 1002 | || !use_module(mod, owner)) |
| 989 | sym = NULL; | 1003 | sym = NULL; |
| 990 | } | 1004 | } |
| 991 | return sym; | 1005 | return sym; |
| @@ -1896,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
| 1896 | unsigned int i; | 1910 | unsigned int i; |
| 1897 | 1911 | ||
| 1898 | /* only scan the sections containing data */ | 1912 | /* only scan the sections containing data */ |
| 1899 | kmemleak_scan_area(mod->module_core, (unsigned long)mod - | 1913 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
| 1900 | (unsigned long)mod->module_core, | ||
| 1901 | sizeof(struct module), GFP_KERNEL); | ||
| 1902 | 1914 | ||
| 1903 | for (i = 1; i < hdr->e_shnum; i++) { | 1915 | for (i = 1; i < hdr->e_shnum; i++) { |
| 1904 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | 1916 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) |
| @@ -1907,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
| 1907 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) | 1919 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) |
| 1908 | continue; | 1920 | continue; |
| 1909 | 1921 | ||
| 1910 | kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - | 1922 | kmemleak_scan_area((void *)sechdrs[i].sh_addr, |
| 1911 | (unsigned long)mod->module_core, | ||
| 1912 | sechdrs[i].sh_size, GFP_KERNEL); | 1923 | sechdrs[i].sh_size, GFP_KERNEL); |
| 1913 | } | 1924 | } |
| 1914 | } | 1925 | } |
| @@ -2236,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2236 | "_ftrace_events", | 2247 | "_ftrace_events", |
| 2237 | sizeof(*mod->trace_events), | 2248 | sizeof(*mod->trace_events), |
| 2238 | &mod->num_trace_events); | 2249 | &mod->num_trace_events); |
| 2250 | /* | ||
| 2251 | * This section contains pointers to allocated objects in the trace | ||
| 2252 | * code and not scanning it leads to false positives. | ||
| 2253 | */ | ||
| 2254 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | ||
| 2255 | mod->num_trace_events, GFP_KERNEL); | ||
| 2239 | #endif | 2256 | #endif |
| 2240 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2257 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 2241 | /* sechdrs[0].sh_size is always zero */ | 2258 | /* sechdrs[0].sh_size is always zero */ |
diff --git a/kernel/panic.c b/kernel/panic.c index 96b45d0b4ba5..5827f7b97254 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | */ | 10 | */ |
| 11 | #include <linux/debug_locks.h> | 11 | #include <linux/debug_locks.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/kmsg_dump.h> | ||
| 13 | #include <linux/kallsyms.h> | 14 | #include <linux/kallsyms.h> |
| 14 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
| 15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| @@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 74 | dump_stack(); | 75 | dump_stack(); |
| 75 | #endif | 76 | #endif |
| 76 | 77 | ||
| 78 | kmsg_dump(KMSG_DUMP_PANIC); | ||
| 77 | /* | 79 | /* |
| 78 | * If we have crashed and we have a crash kernel loaded let it handle | 80 | * If we have crashed and we have a crash kernel loaded let it handle |
| 79 | * everything else. | 81 | * everything else. |
| @@ -339,6 +341,7 @@ void oops_exit(void) | |||
| 339 | { | 341 | { |
| 340 | do_oops_enter_exit(); | 342 | do_oops_enter_exit(); |
| 341 | print_oops_end_marker(); | 343 | print_oops_end_marker(); |
| 344 | kmsg_dump(KMSG_DUMP_OOPS); | ||
| 342 | } | 345 | } |
| 343 | 346 | ||
| 344 | #ifdef WANT_WARN_ON_SLOWPATH | 347 | #ifdef WANT_WARN_ON_SLOWPATH |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9052d6c8c9fd..8ab86988bd24 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -782,6 +782,9 @@ static void __perf_install_in_context(void *info) | |||
| 782 | 782 | ||
| 783 | add_event_to_ctx(event, ctx); | 783 | add_event_to_ctx(event, ctx); |
| 784 | 784 | ||
| 785 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
| 786 | goto unlock; | ||
| 787 | |||
| 785 | /* | 788 | /* |
| 786 | * Don't put the event on if it is disabled or if | 789 | * Don't put the event on if it is disabled or if |
| 787 | * it is in a group and the group isn't on. | 790 | * it is in a group and the group isn't on. |
| @@ -925,6 +928,9 @@ static void __perf_event_enable(void *info) | |||
| 925 | goto unlock; | 928 | goto unlock; |
| 926 | __perf_event_mark_enabled(event, ctx); | 929 | __perf_event_mark_enabled(event, ctx); |
| 927 | 930 | ||
| 931 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
| 932 | goto unlock; | ||
| 933 | |||
| 928 | /* | 934 | /* |
| 929 | * If the event is in a group and isn't the group leader, | 935 | * If the event is in a group and isn't the group leader, |
| 930 | * then don't put it on unless the group is on. | 936 | * then don't put it on unless the group is on. |
| @@ -1595,15 +1601,12 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | |||
| 1595 | unsigned long flags; | 1601 | unsigned long flags; |
| 1596 | int err; | 1602 | int err; |
| 1597 | 1603 | ||
| 1598 | /* | 1604 | if (pid == -1 && cpu != -1) { |
| 1599 | * If cpu is not a wildcard then this is a percpu event: | ||
| 1600 | */ | ||
| 1601 | if (cpu != -1) { | ||
| 1602 | /* Must be root to operate on a CPU event: */ | 1605 | /* Must be root to operate on a CPU event: */ |
| 1603 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 1606 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
| 1604 | return ERR_PTR(-EACCES); | 1607 | return ERR_PTR(-EACCES); |
| 1605 | 1608 | ||
| 1606 | if (cpu < 0 || cpu > num_possible_cpus()) | 1609 | if (cpu < 0 || cpu >= nr_cpumask_bits) |
| 1607 | return ERR_PTR(-EINVAL); | 1610 | return ERR_PTR(-EINVAL); |
| 1608 | 1611 | ||
| 1609 | /* | 1612 | /* |
| @@ -4564,7 +4567,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
| 4564 | if (attr->type >= PERF_TYPE_MAX) | 4567 | if (attr->type >= PERF_TYPE_MAX) |
| 4565 | return -EINVAL; | 4568 | return -EINVAL; |
| 4566 | 4569 | ||
| 4567 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | 4570 | if (attr->__reserved_1 || attr->__reserved_2) |
| 4568 | return -EINVAL; | 4571 | return -EINVAL; |
| 4569 | 4572 | ||
| 4570 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 4573 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
diff --git a/kernel/pid.c b/kernel/pid.c index d3f722d20f9c..2e17c9c92cbe 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -141,11 +141,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) | |||
| 141 | * installing it: | 141 | * installing it: |
| 142 | */ | 142 | */ |
| 143 | spin_lock_irq(&pidmap_lock); | 143 | spin_lock_irq(&pidmap_lock); |
| 144 | if (map->page) | 144 | if (!map->page) { |
| 145 | kfree(page); | ||
| 146 | else | ||
| 147 | map->page = page; | 145 | map->page = page; |
| 146 | page = NULL; | ||
| 147 | } | ||
| 148 | spin_unlock_irq(&pidmap_lock); | 148 | spin_unlock_irq(&pidmap_lock); |
| 149 | kfree(page); | ||
| 149 | if (unlikely(!map->page)) | 150 | if (unlikely(!map->page)) |
| 150 | break; | 151 | break; |
| 151 | } | 152 | } |
| @@ -268,12 +269,11 @@ struct pid *alloc_pid(struct pid_namespace *ns) | |||
| 268 | for (type = 0; type < PIDTYPE_MAX; ++type) | 269 | for (type = 0; type < PIDTYPE_MAX; ++type) |
| 269 | INIT_HLIST_HEAD(&pid->tasks[type]); | 270 | INIT_HLIST_HEAD(&pid->tasks[type]); |
| 270 | 271 | ||
| 272 | upid = pid->numbers + ns->level; | ||
| 271 | spin_lock_irq(&pidmap_lock); | 273 | spin_lock_irq(&pidmap_lock); |
| 272 | for (i = ns->level; i >= 0; i--) { | 274 | for ( ; upid >= pid->numbers; --upid) |
| 273 | upid = &pid->numbers[i]; | ||
| 274 | hlist_add_head_rcu(&upid->pid_chain, | 275 | hlist_add_head_rcu(&upid->pid_chain, |
| 275 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); | 276 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); |
| 276 | } | ||
| 277 | spin_unlock_irq(&pidmap_lock); | 277 | spin_unlock_irq(&pidmap_lock); |
| 278 | 278 | ||
| 279 | out: | 279 | out: |
diff --git a/kernel/printk.c b/kernel/printk.c index b5ac4d99c667..17463ca2e229 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
| 35 | #include <linux/kexec.h> | 35 | #include <linux/kexec.h> |
| 36 | #include <linux/ratelimit.h> | 36 | #include <linux/ratelimit.h> |
| 37 | #include <linux/kmsg_dump.h> | ||
| 37 | 38 | ||
| 38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
| 39 | 40 | ||
| @@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies, | |||
| 1405 | return false; | 1406 | return false; |
| 1406 | } | 1407 | } |
| 1407 | EXPORT_SYMBOL(printk_timed_ratelimit); | 1408 | EXPORT_SYMBOL(printk_timed_ratelimit); |
| 1409 | |||
| 1410 | static DEFINE_SPINLOCK(dump_list_lock); | ||
| 1411 | static LIST_HEAD(dump_list); | ||
| 1412 | |||
| 1413 | /** | ||
| 1414 | * kmsg_dump_register - register a kernel log dumper. | ||
| 1415 | * @dumper: pointer to the kmsg_dumper structure | ||
| 1416 | * | ||
| 1417 | * Adds a kernel log dumper to the system. The dump callback in the | ||
| 1418 | * structure will be called when the kernel oopses or panics and must be | ||
| 1419 | * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. | ||
| 1420 | */ | ||
| 1421 | int kmsg_dump_register(struct kmsg_dumper *dumper) | ||
| 1422 | { | ||
| 1423 | unsigned long flags; | ||
| 1424 | int err = -EBUSY; | ||
| 1425 | |||
| 1426 | /* The dump callback needs to be set */ | ||
| 1427 | if (!dumper->dump) | ||
| 1428 | return -EINVAL; | ||
| 1429 | |||
| 1430 | spin_lock_irqsave(&dump_list_lock, flags); | ||
| 1431 | /* Don't allow registering multiple times */ | ||
| 1432 | if (!dumper->registered) { | ||
| 1433 | dumper->registered = 1; | ||
| 1434 | list_add_tail(&dumper->list, &dump_list); | ||
| 1435 | err = 0; | ||
| 1436 | } | ||
| 1437 | spin_unlock_irqrestore(&dump_list_lock, flags); | ||
| 1438 | |||
| 1439 | return err; | ||
| 1440 | } | ||
| 1441 | EXPORT_SYMBOL_GPL(kmsg_dump_register); | ||
| 1442 | |||
| 1443 | /** | ||
| 1444 | * kmsg_dump_unregister - unregister a kmsg dumper. | ||
| 1445 | * @dumper: pointer to the kmsg_dumper structure | ||
| 1446 | * | ||
| 1447 | * Removes a dump device from the system. Returns zero on success and | ||
| 1448 | * %-EINVAL otherwise. | ||
| 1449 | */ | ||
| 1450 | int kmsg_dump_unregister(struct kmsg_dumper *dumper) | ||
| 1451 | { | ||
| 1452 | unsigned long flags; | ||
| 1453 | int err = -EINVAL; | ||
| 1454 | |||
| 1455 | spin_lock_irqsave(&dump_list_lock, flags); | ||
| 1456 | if (dumper->registered) { | ||
| 1457 | dumper->registered = 0; | ||
| 1458 | list_del(&dumper->list); | ||
| 1459 | err = 0; | ||
| 1460 | } | ||
| 1461 | spin_unlock_irqrestore(&dump_list_lock, flags); | ||
| 1462 | |||
| 1463 | return err; | ||
| 1464 | } | ||
| 1465 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); | ||
| 1466 | |||
| 1467 | static const char const *kmsg_reasons[] = { | ||
| 1468 | [KMSG_DUMP_OOPS] = "oops", | ||
| 1469 | [KMSG_DUMP_PANIC] = "panic", | ||
| 1470 | }; | ||
| 1471 | |||
| 1472 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) | ||
| 1473 | { | ||
| 1474 | if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0) | ||
| 1475 | return "unknown"; | ||
| 1476 | |||
| 1477 | return kmsg_reasons[reason]; | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | /** | ||
| 1481 | * kmsg_dump - dump kernel log to kernel message dumpers. | ||
| 1482 | * @reason: the reason (oops, panic etc) for dumping | ||
| 1483 | * | ||
| 1484 | * Iterate through each of the dump devices and call the oops/panic | ||
| 1485 | * callbacks with the log buffer. | ||
| 1486 | */ | ||
| 1487 | void kmsg_dump(enum kmsg_dump_reason reason) | ||
| 1488 | { | ||
| 1489 | unsigned long end; | ||
| 1490 | unsigned chars; | ||
| 1491 | struct kmsg_dumper *dumper; | ||
| 1492 | const char *s1, *s2; | ||
| 1493 | unsigned long l1, l2; | ||
| 1494 | unsigned long flags; | ||
| 1495 | |||
| 1496 | /* Theoretically, the log could move on after we do this, but | ||
| 1497 | there's not a lot we can do about that. The new messages | ||
| 1498 | will overwrite the start of what we dump. */ | ||
| 1499 | spin_lock_irqsave(&logbuf_lock, flags); | ||
| 1500 | end = log_end & LOG_BUF_MASK; | ||
| 1501 | chars = logged_chars; | ||
| 1502 | spin_unlock_irqrestore(&logbuf_lock, flags); | ||
| 1503 | |||
| 1504 | if (logged_chars > end) { | ||
| 1505 | s1 = log_buf + log_buf_len - logged_chars + end; | ||
| 1506 | l1 = logged_chars - end; | ||
| 1507 | |||
| 1508 | s2 = log_buf; | ||
| 1509 | l2 = end; | ||
| 1510 | } else { | ||
| 1511 | s1 = ""; | ||
| 1512 | l1 = 0; | ||
| 1513 | |||
| 1514 | s2 = log_buf + end - logged_chars; | ||
| 1515 | l2 = logged_chars; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | if (!spin_trylock_irqsave(&dump_list_lock, flags)) { | ||
| 1519 | printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n", | ||
| 1520 | kmsg_to_str(reason)); | ||
| 1521 | return; | ||
| 1522 | } | ||
| 1523 | list_for_each_entry(dumper, &dump_list, list) | ||
| 1524 | dumper->dump(dumper, reason, s1, l1, s2, l2); | ||
| 1525 | spin_unlock_irqrestore(&dump_list_lock, flags); | ||
| 1526 | } | ||
| 1408 | #endif | 1527 | #endif |
diff --git a/kernel/relay.c b/kernel/relay.c index 760c26209a3c..c705a41b4ba3 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -1198,7 +1198,7 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe, | |||
| 1198 | relay_consume_bytes(rbuf, buf->private); | 1198 | relay_consume_bytes(rbuf, buf->private); |
| 1199 | } | 1199 | } |
| 1200 | 1200 | ||
| 1201 | static struct pipe_buf_operations relay_pipe_buf_ops = { | 1201 | static const struct pipe_buf_operations relay_pipe_buf_ops = { |
| 1202 | .can_merge = 0, | 1202 | .can_merge = 0, |
| 1203 | .map = generic_pipe_buf_map, | 1203 | .map = generic_pipe_buf_map, |
| 1204 | .unmap = generic_pipe_buf_unmap, | 1204 | .unmap = generic_pipe_buf_unmap, |
diff --git a/kernel/signal.c b/kernel/signal.c index 6b982f2cf524..1814e68e4de3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -423,7 +423,7 @@ still_pending: | |||
| 423 | */ | 423 | */ |
| 424 | info->si_signo = sig; | 424 | info->si_signo = sig; |
| 425 | info->si_errno = 0; | 425 | info->si_errno = 0; |
| 426 | info->si_code = 0; | 426 | info->si_code = SI_USER; |
| 427 | info->si_pid = 0; | 427 | info->si_pid = 0; |
| 428 | info->si_uid = 0; | 428 | info->si_uid = 0; |
| 429 | } | 429 | } |
| @@ -607,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) | |||
| 607 | return 1; | 607 | return 1; |
| 608 | } | 608 | } |
| 609 | 609 | ||
| 610 | static inline int is_si_special(const struct siginfo *info) | ||
| 611 | { | ||
| 612 | return info <= SEND_SIG_FORCED; | ||
| 613 | } | ||
| 614 | |||
| 615 | static inline bool si_fromuser(const struct siginfo *info) | ||
| 616 | { | ||
| 617 | return info == SEND_SIG_NOINFO || | ||
| 618 | (!is_si_special(info) && SI_FROMUSER(info)); | ||
| 619 | } | ||
| 620 | |||
| 610 | /* | 621 | /* |
| 611 | * Bad permissions for sending the signal | 622 | * Bad permissions for sending the signal |
| 612 | * - the caller must hold at least the RCU read lock | 623 | * - the caller must hold at least the RCU read lock |
| @@ -621,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
| 621 | if (!valid_signal(sig)) | 632 | if (!valid_signal(sig)) |
| 622 | return -EINVAL; | 633 | return -EINVAL; |
| 623 | 634 | ||
| 624 | if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) | 635 | if (!si_fromuser(info)) |
| 625 | return 0; | 636 | return 0; |
| 626 | 637 | ||
| 627 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ | 638 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
| @@ -949,9 +960,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
| 949 | int from_ancestor_ns = 0; | 960 | int from_ancestor_ns = 0; |
| 950 | 961 | ||
| 951 | #ifdef CONFIG_PID_NS | 962 | #ifdef CONFIG_PID_NS |
| 952 | if (!is_si_special(info) && SI_FROMUSER(info) && | 963 | from_ancestor_ns = si_fromuser(info) && |
| 953 | task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) | 964 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
| 954 | from_ancestor_ns = 1; | ||
| 955 | #endif | 965 | #endif |
| 956 | 966 | ||
| 957 | return __send_signal(sig, info, t, group, from_ancestor_ns); | 967 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
| @@ -1052,12 +1062,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
| 1052 | return ret; | 1062 | return ret; |
| 1053 | } | 1063 | } |
| 1054 | 1064 | ||
| 1055 | void | ||
| 1056 | force_sig_specific(int sig, struct task_struct *t) | ||
| 1057 | { | ||
| 1058 | force_sig_info(sig, SEND_SIG_FORCED, t); | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | /* | 1065 | /* |
| 1062 | * Nuke all other threads in the group. | 1066 | * Nuke all other threads in the group. |
| 1063 | */ | 1067 | */ |
| @@ -1186,8 +1190,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
| 1186 | goto out_unlock; | 1190 | goto out_unlock; |
| 1187 | } | 1191 | } |
| 1188 | pcred = __task_cred(p); | 1192 | pcred = __task_cred(p); |
| 1189 | if ((info == SEND_SIG_NOINFO || | 1193 | if (si_fromuser(info) && |
| 1190 | (!is_si_special(info) && SI_FROMUSER(info))) && | ||
| 1191 | euid != pcred->suid && euid != pcred->uid && | 1194 | euid != pcred->suid && euid != pcred->uid && |
| 1192 | uid != pcred->suid && uid != pcred->uid) { | 1195 | uid != pcred->suid && uid != pcred->uid) { |
| 1193 | ret = -EPERM; | 1196 | ret = -EPERM; |
| @@ -1837,11 +1840,6 @@ relock: | |||
| 1837 | 1840 | ||
| 1838 | for (;;) { | 1841 | for (;;) { |
| 1839 | struct k_sigaction *ka; | 1842 | struct k_sigaction *ka; |
| 1840 | |||
| 1841 | if (unlikely(signal->group_stop_count > 0) && | ||
| 1842 | do_signal_stop(0)) | ||
| 1843 | goto relock; | ||
| 1844 | |||
| 1845 | /* | 1843 | /* |
| 1846 | * Tracing can induce an artifical signal and choose sigaction. | 1844 | * Tracing can induce an artifical signal and choose sigaction. |
| 1847 | * The return value in @signr determines the default action, | 1845 | * The return value in @signr determines the default action, |
| @@ -1853,6 +1851,10 @@ relock: | |||
| 1853 | if (unlikely(signr != 0)) | 1851 | if (unlikely(signr != 0)) |
| 1854 | ka = return_ka; | 1852 | ka = return_ka; |
| 1855 | else { | 1853 | else { |
| 1854 | if (unlikely(signal->group_stop_count > 0) && | ||
| 1855 | do_signal_stop(0)) | ||
| 1856 | goto relock; | ||
| 1857 | |||
| 1856 | signr = dequeue_signal(current, ¤t->blocked, | 1858 | signr = dequeue_signal(current, ¤t->blocked, |
| 1857 | info); | 1859 | info); |
| 1858 | 1860 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 45e4bef0012a..6665761c006d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1131,7 +1131,7 @@ static struct ctl_table vm_table[] = { | |||
| 1131 | .data = &sysctl_max_map_count, | 1131 | .data = &sysctl_max_map_count, |
| 1132 | .maxlen = sizeof(sysctl_max_map_count), | 1132 | .maxlen = sizeof(sysctl_max_map_count), |
| 1133 | .mode = 0644, | 1133 | .mode = 0644, |
| 1134 | .proc_handler = proc_dointvec, | 1134 | .proc_handler = proc_dointvec_minmax, |
| 1135 | .extra1 = &zero, | 1135 | .extra1 = &zero, |
| 1136 | }, | 1136 | }, |
| 1137 | #else | 1137 | #else |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index b75dbf40f573..112533d5fc08 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
| @@ -1399,6 +1399,13 @@ static void deprecated_sysctl_warning(const int *name, int nlen) | |||
| 1399 | { | 1399 | { |
| 1400 | int i; | 1400 | int i; |
| 1401 | 1401 | ||
| 1402 | /* | ||
| 1403 | * CTL_KERN/KERN_VERSION is used by older glibc and cannot | ||
| 1404 | * ever go away. | ||
| 1405 | */ | ||
| 1406 | if (name[0] == CTL_KERN && name[1] == KERN_VERSION) | ||
| 1407 | return; | ||
| 1408 | |||
| 1402 | if (printk_ratelimit()) { | 1409 | if (printk_ratelimit()) { |
| 1403 | printk(KERN_INFO | 1410 | printk(KERN_INFO |
| 1404 | "warning: process `%s' used the deprecated sysctl " | 1411 | "warning: process `%s' used the deprecated sysctl " |
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c index 96ff643a5a59..12f5c55090be 100644 --- a/kernel/time/timecompare.c +++ b/kernel/time/timecompare.c | |||
| @@ -89,7 +89,7 @@ int timecompare_offset(struct timecompare *sync, | |||
| 89 | * source time | 89 | * source time |
| 90 | */ | 90 | */ |
| 91 | sample.offset = | 91 | sample.offset = |
| 92 | ktime_to_ns(ktime_add(end, start)) / 2 - | 92 | (ktime_to_ns(end) + ktime_to_ns(start)) / 2 - |
| 93 | ts; | 93 | ts; |
| 94 | 94 | ||
| 95 | /* simple insertion sort based on duration */ | 95 | /* simple insertion sort based on duration */ |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e51a1bcb7bed..7968762c8167 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1724,7 +1724,7 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | |||
| 1724 | return ftrace_match(str, regex, len, type); | 1724 | return ftrace_match(str, regex, len, type); |
| 1725 | } | 1725 | } |
| 1726 | 1726 | ||
| 1727 | static void ftrace_match_records(char *buff, int len, int enable) | 1727 | static int ftrace_match_records(char *buff, int len, int enable) |
| 1728 | { | 1728 | { |
| 1729 | unsigned int search_len; | 1729 | unsigned int search_len; |
| 1730 | struct ftrace_page *pg; | 1730 | struct ftrace_page *pg; |
| @@ -1733,6 +1733,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1733 | char *search; | 1733 | char *search; |
| 1734 | int type; | 1734 | int type; |
| 1735 | int not; | 1735 | int not; |
| 1736 | int found = 0; | ||
| 1736 | 1737 | ||
| 1737 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1738 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
| 1738 | type = filter_parse_regex(buff, len, &search, ¬); | 1739 | type = filter_parse_regex(buff, len, &search, ¬); |
| @@ -1750,6 +1751,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1750 | rec->flags &= ~flag; | 1751 | rec->flags &= ~flag; |
| 1751 | else | 1752 | else |
| 1752 | rec->flags |= flag; | 1753 | rec->flags |= flag; |
| 1754 | found = 1; | ||
| 1753 | } | 1755 | } |
| 1754 | /* | 1756 | /* |
| 1755 | * Only enable filtering if we have a function that | 1757 | * Only enable filtering if we have a function that |
| @@ -1759,6 +1761,8 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1759 | ftrace_filtered = 1; | 1761 | ftrace_filtered = 1; |
| 1760 | } while_for_each_ftrace_rec(); | 1762 | } while_for_each_ftrace_rec(); |
| 1761 | mutex_unlock(&ftrace_lock); | 1763 | mutex_unlock(&ftrace_lock); |
| 1764 | |||
| 1765 | return found; | ||
| 1762 | } | 1766 | } |
| 1763 | 1767 | ||
| 1764 | static int | 1768 | static int |
| @@ -1780,7 +1784,7 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | |||
| 1780 | return 1; | 1784 | return 1; |
| 1781 | } | 1785 | } |
| 1782 | 1786 | ||
| 1783 | static void ftrace_match_module_records(char *buff, char *mod, int enable) | 1787 | static int ftrace_match_module_records(char *buff, char *mod, int enable) |
| 1784 | { | 1788 | { |
| 1785 | unsigned search_len = 0; | 1789 | unsigned search_len = 0; |
| 1786 | struct ftrace_page *pg; | 1790 | struct ftrace_page *pg; |
| @@ -1789,6 +1793,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1789 | char *search = buff; | 1793 | char *search = buff; |
| 1790 | unsigned long flag; | 1794 | unsigned long flag; |
| 1791 | int not = 0; | 1795 | int not = 0; |
| 1796 | int found = 0; | ||
| 1792 | 1797 | ||
| 1793 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1798 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
| 1794 | 1799 | ||
| @@ -1819,12 +1824,15 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1819 | rec->flags &= ~flag; | 1824 | rec->flags &= ~flag; |
| 1820 | else | 1825 | else |
| 1821 | rec->flags |= flag; | 1826 | rec->flags |= flag; |
| 1827 | found = 1; | ||
| 1822 | } | 1828 | } |
| 1823 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | 1829 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
| 1824 | ftrace_filtered = 1; | 1830 | ftrace_filtered = 1; |
| 1825 | 1831 | ||
| 1826 | } while_for_each_ftrace_rec(); | 1832 | } while_for_each_ftrace_rec(); |
| 1827 | mutex_unlock(&ftrace_lock); | 1833 | mutex_unlock(&ftrace_lock); |
| 1834 | |||
| 1835 | return found; | ||
| 1828 | } | 1836 | } |
| 1829 | 1837 | ||
| 1830 | /* | 1838 | /* |
| @@ -1853,8 +1861,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | |||
| 1853 | if (!strlen(mod)) | 1861 | if (!strlen(mod)) |
| 1854 | return -EINVAL; | 1862 | return -EINVAL; |
| 1855 | 1863 | ||
| 1856 | ftrace_match_module_records(func, mod, enable); | 1864 | if (ftrace_match_module_records(func, mod, enable)) |
| 1857 | return 0; | 1865 | return 0; |
| 1866 | return -EINVAL; | ||
| 1858 | } | 1867 | } |
| 1859 | 1868 | ||
| 1860 | static struct ftrace_func_command ftrace_mod_cmd = { | 1869 | static struct ftrace_func_command ftrace_mod_cmd = { |
| @@ -2151,8 +2160,9 @@ static int ftrace_process_regex(char *buff, int len, int enable) | |||
| 2151 | func = strsep(&next, ":"); | 2160 | func = strsep(&next, ":"); |
| 2152 | 2161 | ||
| 2153 | if (!next) { | 2162 | if (!next) { |
| 2154 | ftrace_match_records(func, len, enable); | 2163 | if (ftrace_match_records(func, len, enable)) |
| 2155 | return 0; | 2164 | return 0; |
| 2165 | return ret; | ||
| 2156 | } | 2166 | } |
| 2157 | 2167 | ||
| 2158 | /* command found */ | 2168 | /* command found */ |
| @@ -2198,10 +2208,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
| 2198 | !trace_parser_cont(parser)) { | 2208 | !trace_parser_cont(parser)) { |
| 2199 | ret = ftrace_process_regex(parser->buffer, | 2209 | ret = ftrace_process_regex(parser->buffer, |
| 2200 | parser->idx, enable); | 2210 | parser->idx, enable); |
| 2211 | trace_parser_clear(parser); | ||
| 2201 | if (ret) | 2212 | if (ret) |
| 2202 | goto out_unlock; | 2213 | goto out_unlock; |
| 2203 | |||
| 2204 | trace_parser_clear(parser); | ||
| 2205 | } | 2214 | } |
| 2206 | 2215 | ||
| 2207 | ret = read; | 2216 | ret = read; |
| @@ -2543,10 +2552,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 2543 | exists = true; | 2552 | exists = true; |
| 2544 | break; | 2553 | break; |
| 2545 | } | 2554 | } |
| 2546 | if (!exists) { | 2555 | if (!exists) |
| 2547 | array[(*idx)++] = rec->ip; | 2556 | array[(*idx)++] = rec->ip; |
| 2548 | found = 1; | 2557 | found = 1; |
| 2549 | } | ||
| 2550 | } | 2558 | } |
| 2551 | } while_for_each_ftrace_rec(); | 2559 | } while_for_each_ftrace_rec(); |
| 2552 | 2560 | ||
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c index e06c6e3d56a3..9f4f565b01e6 100644 --- a/kernel/trace/power-traces.c +++ b/kernel/trace/power-traces.c | |||
| @@ -14,7 +14,5 @@ | |||
| 14 | #define CREATE_TRACE_POINTS | 14 | #define CREATE_TRACE_POINTS |
| 15 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
| 16 | 16 | ||
| 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); | ||
| 18 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); | ||
| 19 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); | 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); |
| 20 | 18 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f58c9ad15830..2326b04c95c4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1193 | struct list_head *p; | 1193 | struct list_head *p; |
| 1194 | unsigned i; | 1194 | unsigned i; |
| 1195 | 1195 | ||
| 1196 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1197 | synchronize_sched(); | ||
| 1198 | |||
| 1199 | spin_lock_irq(&cpu_buffer->reader_lock); | 1196 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1200 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
| 1201 | 1198 | ||
| @@ -1211,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1211 | return; | 1208 | return; |
| 1212 | 1209 | ||
| 1213 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
| 1214 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1215 | |||
| 1216 | rb_check_pages(cpu_buffer); | 1211 | rb_check_pages(cpu_buffer); |
| 1217 | 1212 | ||
| 1218 | atomic_dec(&cpu_buffer->record_disabled); | 1213 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1219 | |||
| 1220 | } | 1214 | } |
| 1221 | 1215 | ||
| 1222 | static void | 1216 | static void |
| @@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1227 | struct list_head *p; | 1221 | struct list_head *p; |
| 1228 | unsigned i; | 1222 | unsigned i; |
| 1229 | 1223 | ||
| 1230 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1231 | synchronize_sched(); | ||
| 1232 | |||
| 1233 | spin_lock_irq(&cpu_buffer->reader_lock); | 1224 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1234 | rb_head_page_deactivate(cpu_buffer); | 1225 | rb_head_page_deactivate(cpu_buffer); |
| 1235 | 1226 | ||
| @@ -1242,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1242 | list_add_tail(&bpage->list, cpu_buffer->pages); | 1233 | list_add_tail(&bpage->list, cpu_buffer->pages); |
| 1243 | } | 1234 | } |
| 1244 | rb_reset_cpu(cpu_buffer); | 1235 | rb_reset_cpu(cpu_buffer); |
| 1245 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1246 | |||
| 1247 | rb_check_pages(cpu_buffer); | 1236 | rb_check_pages(cpu_buffer); |
| 1248 | 1237 | ||
| 1249 | atomic_dec(&cpu_buffer->record_disabled); | 1238 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1250 | } | 1239 | } |
| 1251 | 1240 | ||
| 1252 | /** | 1241 | /** |
| @@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1254 | * @buffer: the buffer to resize. | 1243 | * @buffer: the buffer to resize. |
| 1255 | * @size: the new size. | 1244 | * @size: the new size. |
| 1256 | * | 1245 | * |
| 1257 | * The tracer is responsible for making sure that the buffer is | ||
| 1258 | * not being used while changing the size. | ||
| 1259 | * Note: We may be able to change the above requirement by using | ||
| 1260 | * RCU synchronizations. | ||
| 1261 | * | ||
| 1262 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1246 | * Minimum size is 2 * BUF_PAGE_SIZE. |
| 1263 | * | 1247 | * |
| 1264 | * Returns -1 on failure. | 1248 | * Returns -1 on failure. |
| @@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1290 | if (size == buffer_size) | 1274 | if (size == buffer_size) |
| 1291 | return size; | 1275 | return size; |
| 1292 | 1276 | ||
| 1277 | atomic_inc(&buffer->record_disabled); | ||
| 1278 | |||
| 1279 | /* Make sure all writers are done with this buffer. */ | ||
| 1280 | synchronize_sched(); | ||
| 1281 | |||
| 1293 | mutex_lock(&buffer->mutex); | 1282 | mutex_lock(&buffer->mutex); |
| 1294 | get_online_cpus(); | 1283 | get_online_cpus(); |
| 1295 | 1284 | ||
| @@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1352 | put_online_cpus(); | 1341 | put_online_cpus(); |
| 1353 | mutex_unlock(&buffer->mutex); | 1342 | mutex_unlock(&buffer->mutex); |
| 1354 | 1343 | ||
| 1344 | atomic_dec(&buffer->record_disabled); | ||
| 1345 | |||
| 1355 | return size; | 1346 | return size; |
| 1356 | 1347 | ||
| 1357 | free_pages: | 1348 | free_pages: |
| @@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1361 | } | 1352 | } |
| 1362 | put_online_cpus(); | 1353 | put_online_cpus(); |
| 1363 | mutex_unlock(&buffer->mutex); | 1354 | mutex_unlock(&buffer->mutex); |
| 1355 | atomic_dec(&buffer->record_disabled); | ||
| 1364 | return -ENOMEM; | 1356 | return -ENOMEM; |
| 1365 | 1357 | ||
| 1366 | /* | 1358 | /* |
| @@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1370 | out_fail: | 1362 | out_fail: |
| 1371 | put_online_cpus(); | 1363 | put_online_cpus(); |
| 1372 | mutex_unlock(&buffer->mutex); | 1364 | mutex_unlock(&buffer->mutex); |
| 1365 | atomic_dec(&buffer->record_disabled); | ||
| 1373 | return -1; | 1366 | return -1; |
| 1374 | } | 1367 | } |
| 1375 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 1368 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bb6b5e7fa2a2..8b9f20ab8eed 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
| 13 | */ | 13 | */ |
| 14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
| 15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
| 16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| @@ -313,7 +313,6 @@ static const char *trace_options[] = { | |||
| 313 | "bin", | 313 | "bin", |
| 314 | "block", | 314 | "block", |
| 315 | "stacktrace", | 315 | "stacktrace", |
| 316 | "sched-tree", | ||
| 317 | "trace_printk", | 316 | "trace_printk", |
| 318 | "ftrace_preempt", | 317 | "ftrace_preempt", |
| 319 | "branch", | 318 | "branch", |
| @@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
| 1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1150 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1152 | } | 1151 | } |
| 1153 | 1152 | ||
| 1153 | /** | ||
| 1154 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
| 1155 | */ | ||
| 1156 | void trace_dump_stack(void) | ||
| 1157 | { | ||
| 1158 | unsigned long flags; | ||
| 1159 | |||
| 1160 | if (tracing_disabled || tracing_selftest_running) | ||
| 1161 | return; | ||
| 1162 | |||
| 1163 | local_save_flags(flags); | ||
| 1164 | |||
| 1165 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
| 1166 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
| 1167 | } | ||
| 1168 | |||
| 1154 | void | 1169 | void |
| 1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1170 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1156 | { | 1171 | { |
| @@ -2316,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
| 2316 | .write = tracing_cpumask_write, | 2331 | .write = tracing_cpumask_write, |
| 2317 | }; | 2332 | }; |
| 2318 | 2333 | ||
| 2319 | static ssize_t | 2334 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
| 2320 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
| 2321 | size_t cnt, loff_t *ppos) | ||
| 2322 | { | 2335 | { |
| 2323 | struct tracer_opt *trace_opts; | 2336 | struct tracer_opt *trace_opts; |
| 2324 | u32 tracer_flags; | 2337 | u32 tracer_flags; |
| 2325 | int len = 0; | ||
| 2326 | char *buf; | ||
| 2327 | int r = 0; | ||
| 2328 | int i; | 2338 | int i; |
| 2329 | 2339 | ||
| 2330 | |||
| 2331 | /* calculate max size */ | ||
| 2332 | for (i = 0; trace_options[i]; i++) { | ||
| 2333 | len += strlen(trace_options[i]); | ||
| 2334 | len += 3; /* "no" and newline */ | ||
| 2335 | } | ||
| 2336 | |||
| 2337 | mutex_lock(&trace_types_lock); | 2340 | mutex_lock(&trace_types_lock); |
| 2338 | tracer_flags = current_trace->flags->val; | 2341 | tracer_flags = current_trace->flags->val; |
| 2339 | trace_opts = current_trace->flags->opts; | 2342 | trace_opts = current_trace->flags->opts; |
| 2340 | 2343 | ||
| 2341 | /* | ||
| 2342 | * Increase the size with names of options specific | ||
| 2343 | * of the current tracer. | ||
| 2344 | */ | ||
| 2345 | for (i = 0; trace_opts[i].name; i++) { | ||
| 2346 | len += strlen(trace_opts[i].name); | ||
| 2347 | len += 3; /* "no" and newline */ | ||
| 2348 | } | ||
| 2349 | |||
| 2350 | /* +1 for \0 */ | ||
| 2351 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
| 2352 | if (!buf) { | ||
| 2353 | mutex_unlock(&trace_types_lock); | ||
| 2354 | return -ENOMEM; | ||
| 2355 | } | ||
| 2356 | |||
| 2357 | for (i = 0; trace_options[i]; i++) { | 2344 | for (i = 0; trace_options[i]; i++) { |
| 2358 | if (trace_flags & (1 << i)) | 2345 | if (trace_flags & (1 << i)) |
| 2359 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2346 | seq_printf(m, "%s\n", trace_options[i]); |
| 2360 | else | 2347 | else |
| 2361 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2348 | seq_printf(m, "no%s\n", trace_options[i]); |
| 2362 | } | 2349 | } |
| 2363 | 2350 | ||
| 2364 | for (i = 0; trace_opts[i].name; i++) { | 2351 | for (i = 0; trace_opts[i].name; i++) { |
| 2365 | if (tracer_flags & trace_opts[i].bit) | 2352 | if (tracer_flags & trace_opts[i].bit) |
| 2366 | r += sprintf(buf + r, "%s\n", | 2353 | seq_printf(m, "%s\n", trace_opts[i].name); |
| 2367 | trace_opts[i].name); | ||
| 2368 | else | 2354 | else |
| 2369 | r += sprintf(buf + r, "no%s\n", | 2355 | seq_printf(m, "no%s\n", trace_opts[i].name); |
| 2370 | trace_opts[i].name); | ||
| 2371 | } | 2356 | } |
| 2372 | mutex_unlock(&trace_types_lock); | 2357 | mutex_unlock(&trace_types_lock); |
| 2373 | 2358 | ||
| 2374 | WARN_ON(r >= len + 1); | 2359 | return 0; |
| 2360 | } | ||
| 2375 | 2361 | ||
| 2376 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2362 | static int __set_tracer_option(struct tracer *trace, |
| 2363 | struct tracer_flags *tracer_flags, | ||
| 2364 | struct tracer_opt *opts, int neg) | ||
| 2365 | { | ||
| 2366 | int ret; | ||
| 2377 | 2367 | ||
| 2378 | kfree(buf); | 2368 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
| 2379 | return r; | 2369 | if (ret) |
| 2370 | return ret; | ||
| 2371 | |||
| 2372 | if (neg) | ||
| 2373 | tracer_flags->val &= ~opts->bit; | ||
| 2374 | else | ||
| 2375 | tracer_flags->val |= opts->bit; | ||
| 2376 | return 0; | ||
| 2380 | } | 2377 | } |
| 2381 | 2378 | ||
| 2382 | /* Try to assign a tracer specific option */ | 2379 | /* Try to assign a tracer specific option */ |
| @@ -2384,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2384 | { | 2381 | { |
| 2385 | struct tracer_flags *tracer_flags = trace->flags; | 2382 | struct tracer_flags *tracer_flags = trace->flags; |
| 2386 | struct tracer_opt *opts = NULL; | 2383 | struct tracer_opt *opts = NULL; |
| 2387 | int ret = 0, i = 0; | 2384 | int i; |
| 2388 | int len; | ||
| 2389 | 2385 | ||
| 2390 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2386 | for (i = 0; tracer_flags->opts[i].name; i++) { |
| 2391 | opts = &tracer_flags->opts[i]; | 2387 | opts = &tracer_flags->opts[i]; |
| 2392 | len = strlen(opts->name); | ||
| 2393 | 2388 | ||
| 2394 | if (strncmp(cmp, opts->name, len) == 0) { | 2389 | if (strcmp(cmp, opts->name) == 0) |
| 2395 | ret = trace->set_flag(tracer_flags->val, | 2390 | return __set_tracer_option(trace, trace->flags, |
| 2396 | opts->bit, !neg); | 2391 | opts, neg); |
| 2397 | break; | ||
| 2398 | } | ||
| 2399 | } | 2392 | } |
| 2400 | /* Not found */ | ||
| 2401 | if (!tracer_flags->opts[i].name) | ||
| 2402 | return -EINVAL; | ||
| 2403 | |||
| 2404 | /* Refused to handle */ | ||
| 2405 | if (ret) | ||
| 2406 | return ret; | ||
| 2407 | |||
| 2408 | if (neg) | ||
| 2409 | tracer_flags->val &= ~opts->bit; | ||
| 2410 | else | ||
| 2411 | tracer_flags->val |= opts->bit; | ||
| 2412 | 2393 | ||
| 2413 | return 0; | 2394 | return -EINVAL; |
| 2414 | } | 2395 | } |
| 2415 | 2396 | ||
| 2416 | static void set_tracer_flags(unsigned int mask, int enabled) | 2397 | static void set_tracer_flags(unsigned int mask, int enabled) |
| @@ -2430,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2430 | size_t cnt, loff_t *ppos) | 2411 | size_t cnt, loff_t *ppos) |
| 2431 | { | 2412 | { |
| 2432 | char buf[64]; | 2413 | char buf[64]; |
| 2433 | char *cmp = buf; | 2414 | char *cmp; |
| 2434 | int neg = 0; | 2415 | int neg = 0; |
| 2435 | int ret; | 2416 | int ret; |
| 2436 | int i; | 2417 | int i; |
| @@ -2442,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2442 | return -EFAULT; | 2423 | return -EFAULT; |
| 2443 | 2424 | ||
| 2444 | buf[cnt] = 0; | 2425 | buf[cnt] = 0; |
| 2426 | cmp = strstrip(buf); | ||
| 2445 | 2427 | ||
| 2446 | if (strncmp(buf, "no", 2) == 0) { | 2428 | if (strncmp(cmp, "no", 2) == 0) { |
| 2447 | neg = 1; | 2429 | neg = 1; |
| 2448 | cmp += 2; | 2430 | cmp += 2; |
| 2449 | } | 2431 | } |
| 2450 | 2432 | ||
| 2451 | for (i = 0; trace_options[i]; i++) { | 2433 | for (i = 0; trace_options[i]; i++) { |
| 2452 | int len = strlen(trace_options[i]); | 2434 | if (strcmp(cmp, trace_options[i]) == 0) { |
| 2453 | |||
| 2454 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
| 2455 | set_tracer_flags(1 << i, !neg); | 2435 | set_tracer_flags(1 << i, !neg); |
| 2456 | break; | 2436 | break; |
| 2457 | } | 2437 | } |
| @@ -2471,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2471 | return cnt; | 2451 | return cnt; |
| 2472 | } | 2452 | } |
| 2473 | 2453 | ||
| 2454 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
| 2455 | { | ||
| 2456 | if (tracing_disabled) | ||
| 2457 | return -ENODEV; | ||
| 2458 | return single_open(file, tracing_trace_options_show, NULL); | ||
| 2459 | } | ||
| 2460 | |||
| 2474 | static const struct file_operations tracing_iter_fops = { | 2461 | static const struct file_operations tracing_iter_fops = { |
| 2475 | .open = tracing_open_generic, | 2462 | .open = tracing_trace_options_open, |
| 2476 | .read = tracing_trace_options_read, | 2463 | .read = seq_read, |
| 2464 | .llseek = seq_lseek, | ||
| 2465 | .release = single_release, | ||
| 2477 | .write = tracing_trace_options_write, | 2466 | .write = tracing_trace_options_write, |
| 2478 | }; | 2467 | }; |
| 2479 | 2468 | ||
| @@ -3133,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 3133 | __free_page(spd->pages[idx]); | 3122 | __free_page(spd->pages[idx]); |
| 3134 | } | 3123 | } |
| 3135 | 3124 | ||
| 3136 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3125 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
| 3137 | .can_merge = 0, | 3126 | .can_merge = 0, |
| 3138 | .map = generic_pipe_buf_map, | 3127 | .map = generic_pipe_buf_map, |
| 3139 | .unmap = generic_pipe_buf_unmap, | 3128 | .unmap = generic_pipe_buf_unmap, |
| @@ -3392,21 +3381,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3392 | return cnt; | 3381 | return cnt; |
| 3393 | } | 3382 | } |
| 3394 | 3383 | ||
| 3395 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3384 | static int tracing_clock_show(struct seq_file *m, void *v) |
| 3396 | size_t cnt, loff_t *ppos) | ||
| 3397 | { | 3385 | { |
| 3398 | char buf[64]; | ||
| 3399 | int bufiter = 0; | ||
| 3400 | int i; | 3386 | int i; |
| 3401 | 3387 | ||
| 3402 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3388 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
| 3403 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3389 | seq_printf(m, |
| 3404 | "%s%s%s%s", i ? " " : "", | 3390 | "%s%s%s%s", i ? " " : "", |
| 3405 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3391 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
| 3406 | i == trace_clock_id ? "]" : ""); | 3392 | i == trace_clock_id ? "]" : ""); |
| 3407 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3393 | seq_putc(m, '\n'); |
| 3408 | 3394 | ||
| 3409 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3395 | return 0; |
| 3410 | } | 3396 | } |
| 3411 | 3397 | ||
| 3412 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3398 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
| @@ -3448,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 3448 | return cnt; | 3434 | return cnt; |
| 3449 | } | 3435 | } |
| 3450 | 3436 | ||
| 3437 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
| 3438 | { | ||
| 3439 | if (tracing_disabled) | ||
| 3440 | return -ENODEV; | ||
| 3441 | return single_open(file, tracing_clock_show, NULL); | ||
| 3442 | } | ||
| 3443 | |||
| 3451 | static const struct file_operations tracing_max_lat_fops = { | 3444 | static const struct file_operations tracing_max_lat_fops = { |
| 3452 | .open = tracing_open_generic, | 3445 | .open = tracing_open_generic, |
| 3453 | .read = tracing_max_lat_read, | 3446 | .read = tracing_max_lat_read, |
| @@ -3486,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = { | |||
| 3486 | }; | 3479 | }; |
| 3487 | 3480 | ||
| 3488 | static const struct file_operations trace_clock_fops = { | 3481 | static const struct file_operations trace_clock_fops = { |
| 3489 | .open = tracing_open_generic, | 3482 | .open = tracing_clock_open, |
| 3490 | .read = tracing_clock_read, | 3483 | .read = seq_read, |
| 3484 | .llseek = seq_lseek, | ||
| 3485 | .release = single_release, | ||
| 3491 | .write = tracing_clock_write, | 3486 | .write = tracing_clock_write, |
| 3492 | }; | 3487 | }; |
| 3493 | 3488 | ||
| @@ -3617,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 3617 | } | 3612 | } |
| 3618 | 3613 | ||
| 3619 | /* Pipe buffer operations for a buffer. */ | 3614 | /* Pipe buffer operations for a buffer. */ |
| 3620 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3615 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
| 3621 | .can_merge = 0, | 3616 | .can_merge = 0, |
| 3622 | .map = generic_pipe_buf_map, | 3617 | .map = generic_pipe_buf_map, |
| 3623 | .unmap = generic_pipe_buf_unmap, | 3618 | .unmap = generic_pipe_buf_unmap, |
| @@ -3948,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 3948 | if (ret < 0) | 3943 | if (ret < 0) |
| 3949 | return ret; | 3944 | return ret; |
| 3950 | 3945 | ||
| 3951 | ret = 0; | 3946 | if (val != 0 && val != 1) |
| 3952 | switch (val) { | 3947 | return -EINVAL; |
| 3953 | case 0: | ||
| 3954 | /* do nothing if already cleared */ | ||
| 3955 | if (!(topt->flags->val & topt->opt->bit)) | ||
| 3956 | break; | ||
| 3957 | |||
| 3958 | mutex_lock(&trace_types_lock); | ||
| 3959 | if (current_trace->set_flag) | ||
| 3960 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3961 | topt->opt->bit, 0); | ||
| 3962 | mutex_unlock(&trace_types_lock); | ||
| 3963 | if (ret) | ||
| 3964 | return ret; | ||
| 3965 | topt->flags->val &= ~topt->opt->bit; | ||
| 3966 | break; | ||
| 3967 | case 1: | ||
| 3968 | /* do nothing if already set */ | ||
| 3969 | if (topt->flags->val & topt->opt->bit) | ||
| 3970 | break; | ||
| 3971 | 3948 | ||
| 3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
| 3972 | mutex_lock(&trace_types_lock); | 3950 | mutex_lock(&trace_types_lock); |
| 3973 | if (current_trace->set_flag) | 3951 | ret = __set_tracer_option(current_trace, topt->flags, |
| 3974 | ret = current_trace->set_flag(topt->flags->val, | 3952 | topt->opt, val); |
| 3975 | topt->opt->bit, 1); | ||
| 3976 | mutex_unlock(&trace_types_lock); | 3953 | mutex_unlock(&trace_types_lock); |
| 3977 | if (ret) | 3954 | if (ret) |
| 3978 | return ret; | 3955 | return ret; |
| 3979 | topt->flags->val |= topt->opt->bit; | ||
| 3980 | break; | ||
| 3981 | |||
| 3982 | default: | ||
| 3983 | return -EINVAL; | ||
| 3984 | } | 3956 | } |
| 3985 | 3957 | ||
| 3986 | *ppos += cnt; | 3958 | *ppos += cnt; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a52bed2eedd8..4df6a77eb196 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -597,18 +597,17 @@ enum trace_iterator_flags { | |||
| 597 | TRACE_ITER_BIN = 0x40, | 597 | TRACE_ITER_BIN = 0x40, |
| 598 | TRACE_ITER_BLOCK = 0x80, | 598 | TRACE_ITER_BLOCK = 0x80, |
| 599 | TRACE_ITER_STACKTRACE = 0x100, | 599 | TRACE_ITER_STACKTRACE = 0x100, |
| 600 | TRACE_ITER_SCHED_TREE = 0x200, | 600 | TRACE_ITER_PRINTK = 0x200, |
| 601 | TRACE_ITER_PRINTK = 0x400, | 601 | TRACE_ITER_PREEMPTONLY = 0x400, |
| 602 | TRACE_ITER_PREEMPTONLY = 0x800, | 602 | TRACE_ITER_BRANCH = 0x800, |
| 603 | TRACE_ITER_BRANCH = 0x1000, | 603 | TRACE_ITER_ANNOTATE = 0x1000, |
| 604 | TRACE_ITER_ANNOTATE = 0x2000, | 604 | TRACE_ITER_USERSTACKTRACE = 0x2000, |
| 605 | TRACE_ITER_USERSTACKTRACE = 0x4000, | 605 | TRACE_ITER_SYM_USEROBJ = 0x4000, |
| 606 | TRACE_ITER_SYM_USEROBJ = 0x8000, | 606 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, |
| 607 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, | 607 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ |
| 608 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | 608 | TRACE_ITER_LATENCY_FMT = 0x20000, |
| 609 | TRACE_ITER_LATENCY_FMT = 0x40000, | 609 | TRACE_ITER_SLEEP_TIME = 0x40000, |
| 610 | TRACE_ITER_SLEEP_TIME = 0x80000, | 610 | TRACE_ITER_GRAPH_TIME = 0x80000, |
| 611 | TRACE_ITER_GRAPH_TIME = 0x100000, | ||
| 612 | }; | 611 | }; |
| 613 | 612 | ||
| 614 | /* | 613 | /* |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index d9c60f80aa0d..9e25573242cf 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -25,7 +25,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
| 25 | char *buf; | 25 | char *buf; |
| 26 | int ret = -ENOMEM; | 26 | int ret = -ENOMEM; |
| 27 | 27 | ||
| 28 | if (atomic_inc_return(&event->profile_count)) | 28 | if (event->profile_count++ > 0) |
| 29 | return 0; | 29 | return 0; |
| 30 | 30 | ||
| 31 | if (!total_profile_count) { | 31 | if (!total_profile_count) { |
| @@ -56,7 +56,7 @@ fail_buf_nmi: | |||
| 56 | perf_trace_buf = NULL; | 56 | perf_trace_buf = NULL; |
| 57 | } | 57 | } |
| 58 | fail_buf: | 58 | fail_buf: |
| 59 | atomic_dec(&event->profile_count); | 59 | event->profile_count--; |
| 60 | 60 | ||
| 61 | return ret; | 61 | return ret; |
| 62 | } | 62 | } |
| @@ -83,7 +83,7 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
| 83 | { | 83 | { |
| 84 | char *buf, *nmi_buf; | 84 | char *buf, *nmi_buf; |
| 85 | 85 | ||
| 86 | if (!atomic_add_negative(-1, &event->profile_count)) | 86 | if (--event->profile_count > 0) |
| 87 | return; | 87 | return; |
| 88 | 88 | ||
| 89 | event->profile_disable(event); | 89 | event->profile_disable(event); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1d18315dc836..189b09baf4fb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(trace_define_field); | |||
| 78 | if (ret) \ | 78 | if (ret) \ |
| 79 | return ret; | 79 | return ret; |
| 80 | 80 | ||
| 81 | int trace_define_common_fields(struct ftrace_event_call *call) | 81 | static int trace_define_common_fields(struct ftrace_event_call *call) |
| 82 | { | 82 | { |
| 83 | int ret; | 83 | int ret; |
| 84 | struct trace_entry ent; | 84 | struct trace_entry ent; |
| @@ -91,7 +91,6 @@ int trace_define_common_fields(struct ftrace_event_call *call) | |||
| 91 | 91 | ||
| 92 | return ret; | 92 | return ret; |
| 93 | } | 93 | } |
| 94 | EXPORT_SYMBOL_GPL(trace_define_common_fields); | ||
| 95 | 94 | ||
| 96 | void trace_destroy_fields(struct ftrace_event_call *call) | 95 | void trace_destroy_fields(struct ftrace_event_call *call) |
| 97 | { | 96 | { |
| @@ -105,9 +104,25 @@ void trace_destroy_fields(struct ftrace_event_call *call) | |||
| 105 | } | 104 | } |
| 106 | } | 105 | } |
| 107 | 106 | ||
| 108 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | 107 | int trace_event_raw_init(struct ftrace_event_call *call) |
| 108 | { | ||
| 109 | int id; | ||
| 110 | |||
| 111 | id = register_ftrace_event(call->event); | ||
| 112 | if (!id) | ||
| 113 | return -ENODEV; | ||
| 114 | call->id = id; | ||
| 115 | INIT_LIST_HEAD(&call->fields); | ||
| 116 | |||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | ||
| 120 | |||
| 121 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | ||
| 109 | int enable) | 122 | int enable) |
| 110 | { | 123 | { |
| 124 | int ret = 0; | ||
| 125 | |||
| 111 | switch (enable) { | 126 | switch (enable) { |
| 112 | case 0: | 127 | case 0: |
| 113 | if (call->enabled) { | 128 | if (call->enabled) { |
| @@ -118,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
| 118 | break; | 133 | break; |
| 119 | case 1: | 134 | case 1: |
| 120 | if (!call->enabled) { | 135 | if (!call->enabled) { |
| 121 | call->enabled = 1; | ||
| 122 | tracing_start_cmdline_record(); | 136 | tracing_start_cmdline_record(); |
| 123 | call->regfunc(call); | 137 | ret = call->regfunc(call); |
| 138 | if (ret) { | ||
| 139 | tracing_stop_cmdline_record(); | ||
| 140 | pr_info("event trace: Could not enable event " | ||
| 141 | "%s\n", call->name); | ||
| 142 | break; | ||
| 143 | } | ||
| 144 | call->enabled = 1; | ||
| 124 | } | 145 | } |
| 125 | break; | 146 | break; |
| 126 | } | 147 | } |
| 148 | |||
| 149 | return ret; | ||
| 127 | } | 150 | } |
| 128 | 151 | ||
| 129 | static void ftrace_clear_events(void) | 152 | static void ftrace_clear_events(void) |
| @@ -402,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 402 | case 0: | 425 | case 0: |
| 403 | case 1: | 426 | case 1: |
| 404 | mutex_lock(&event_mutex); | 427 | mutex_lock(&event_mutex); |
| 405 | ftrace_event_enable_disable(call, val); | 428 | ret = ftrace_event_enable_disable(call, val); |
| 406 | mutex_unlock(&event_mutex); | 429 | mutex_unlock(&event_mutex); |
| 407 | break; | 430 | break; |
| 408 | 431 | ||
| @@ -412,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 412 | 435 | ||
| 413 | *ppos += cnt; | 436 | *ppos += cnt; |
| 414 | 437 | ||
| 415 | return cnt; | 438 | return ret ? ret : cnt; |
| 416 | } | 439 | } |
| 417 | 440 | ||
| 418 | static ssize_t | 441 | static ssize_t |
| @@ -913,7 +936,9 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 913 | id); | 936 | id); |
| 914 | 937 | ||
| 915 | if (call->define_fields) { | 938 | if (call->define_fields) { |
| 916 | ret = call->define_fields(call); | 939 | ret = trace_define_common_fields(call); |
| 940 | if (!ret) | ||
| 941 | ret = call->define_fields(call); | ||
| 917 | if (ret < 0) { | 942 | if (ret < 0) { |
| 918 | pr_warning("Could not initialize trace point" | 943 | pr_warning("Could not initialize trace point" |
| 919 | " events/%s\n", call->name); | 944 | " events/%s\n", call->name); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index dff8c84ddf17..458e5bfe26d0 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -184,10 +184,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
| 184 | struct struct_name field; \ | 184 | struct struct_name field; \ |
| 185 | int ret; \ | 185 | int ret; \ |
| 186 | \ | 186 | \ |
| 187 | ret = trace_define_common_fields(event_call); \ | ||
| 188 | if (ret) \ | ||
| 189 | return ret; \ | ||
| 190 | \ | ||
| 191 | tstruct; \ | 187 | tstruct; \ |
| 192 | \ | 188 | \ |
| 193 | return ret; \ | 189 | return ret; \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 3aa7eaa2114c..2974bc7538c7 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -151,6 +151,8 @@ check_critical_timing(struct trace_array *tr, | |||
| 151 | goto out_unlock; | 151 | goto out_unlock; |
| 152 | 152 | ||
| 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
| 154 | /* Skip 5 functions to get to the irq/preempt enable function */ | ||
| 155 | __trace_stack(tr, flags, 5, pc); | ||
| 154 | 156 | ||
| 155 | if (data->critical_sequence != max_sequence) | 157 | if (data->critical_sequence != max_sequence) |
| 156 | goto out_unlock; | 158 | goto out_unlock; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b52d397e57eb..7ecab06547a5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -1132,10 +1132,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1132 | struct kprobe_trace_entry field; | 1132 | struct kprobe_trace_entry field; |
| 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1134 | 1134 | ||
| 1135 | ret = trace_define_common_fields(event_call); | ||
| 1136 | if (ret) | ||
| 1137 | return ret; | ||
| 1138 | |||
| 1139 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1135 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
| 1140 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | 1136 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); |
| 1141 | /* Set argument names as fields */ | 1137 | /* Set argument names as fields */ |
| @@ -1150,10 +1146,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1150 | struct kretprobe_trace_entry field; | 1146 | struct kretprobe_trace_entry field; |
| 1151 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1147 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1152 | 1148 | ||
| 1153 | ret = trace_define_common_fields(event_call); | ||
| 1154 | if (ret) | ||
| 1155 | return ret; | ||
| 1156 | |||
| 1157 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1149 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
| 1158 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1150 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
| 1159 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | 1151 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); |
| @@ -1453,7 +1445,6 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1453 | call->unregfunc = probe_event_disable; | 1445 | call->unregfunc = probe_event_disable; |
| 1454 | 1446 | ||
| 1455 | #ifdef CONFIG_EVENT_PROFILE | 1447 | #ifdef CONFIG_EVENT_PROFILE |
| 1456 | atomic_set(&call->profile_count, -1); | ||
| 1457 | call->profile_enable = probe_profile_enable; | 1448 | call->profile_enable = probe_profile_enable; |
| 1458 | call->profile_disable = probe_profile_disable; | 1449 | call->profile_disable = probe_profile_disable; |
| 1459 | #endif | 1450 | #endif |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index acb87d4a4ac1..faf37fa4408c 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
| @@ -236,7 +236,8 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | |||
| 236 | mutex_lock(&ksym_tracer_mutex); | 236 | mutex_lock(&ksym_tracer_mutex); |
| 237 | 237 | ||
| 238 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | 238 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { |
| 239 | ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); | 239 | ret = trace_seq_printf(s, "%pS:", |
| 240 | (void *)(unsigned long)entry->attr.bp_addr); | ||
| 240 | if (entry->attr.bp_type == HW_BREAKPOINT_R) | 241 | if (entry->attr.bp_type == HW_BREAKPOINT_R) |
| 241 | ret = trace_seq_puts(s, "r--\n"); | 242 | ret = trace_seq_puts(s, "r--\n"); |
| 242 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) | 243 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) |
| @@ -278,21 +279,20 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 278 | { | 279 | { |
| 279 | struct trace_ksym *entry; | 280 | struct trace_ksym *entry; |
| 280 | struct hlist_node *node; | 281 | struct hlist_node *node; |
| 281 | char *input_string, *ksymname = NULL; | 282 | char *buf, *input_string, *ksymname = NULL; |
| 282 | unsigned long ksym_addr = 0; | 283 | unsigned long ksym_addr = 0; |
| 283 | int ret, op, changed = 0; | 284 | int ret, op, changed = 0; |
| 284 | 285 | ||
| 285 | input_string = kzalloc(count + 1, GFP_KERNEL); | 286 | buf = kzalloc(count + 1, GFP_KERNEL); |
| 286 | if (!input_string) | 287 | if (!buf) |
| 287 | return -ENOMEM; | 288 | return -ENOMEM; |
| 288 | 289 | ||
| 289 | if (copy_from_user(input_string, buffer, count)) { | 290 | ret = -EFAULT; |
| 290 | kfree(input_string); | 291 | if (copy_from_user(buf, buffer, count)) |
| 291 | return -EFAULT; | 292 | goto out; |
| 292 | } | ||
| 293 | input_string[count] = '\0'; | ||
| 294 | 293 | ||
| 295 | strstrip(input_string); | 294 | buf[count] = '\0'; |
| 295 | input_string = strstrip(buf); | ||
| 296 | 296 | ||
| 297 | /* | 297 | /* |
| 298 | * Clear all breakpoints if: | 298 | * Clear all breakpoints if: |
| @@ -300,18 +300,16 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 300 | * 2: echo 0 > ksym_trace_filter | 300 | * 2: echo 0 > ksym_trace_filter |
| 301 | * 3: echo "*:---" > ksym_trace_filter | 301 | * 3: echo "*:---" > ksym_trace_filter |
| 302 | */ | 302 | */ |
| 303 | if (!input_string[0] || !strcmp(input_string, "0") || | 303 | if (!buf[0] || !strcmp(buf, "0") || |
| 304 | !strcmp(input_string, "*:---")) { | 304 | !strcmp(buf, "*:---")) { |
| 305 | __ksym_trace_reset(); | 305 | __ksym_trace_reset(); |
| 306 | kfree(input_string); | 306 | ret = 0; |
| 307 | return count; | 307 | goto out; |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | 310 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); |
| 311 | if (ret < 0) { | 311 | if (ret < 0) |
| 312 | kfree(input_string); | 312 | goto out; |
| 313 | return ret; | ||
| 314 | } | ||
| 315 | 313 | ||
| 316 | mutex_lock(&ksym_tracer_mutex); | 314 | mutex_lock(&ksym_tracer_mutex); |
| 317 | 315 | ||
| @@ -322,7 +320,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 322 | if (entry->attr.bp_type != op) | 320 | if (entry->attr.bp_type != op) |
| 323 | changed = 1; | 321 | changed = 1; |
| 324 | else | 322 | else |
| 325 | goto out; | 323 | goto out_unlock; |
| 326 | break; | 324 | break; |
| 327 | } | 325 | } |
| 328 | } | 326 | } |
| @@ -337,28 +335,24 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 337 | if (IS_ERR(entry->ksym_hbp)) | 335 | if (IS_ERR(entry->ksym_hbp)) |
| 338 | ret = PTR_ERR(entry->ksym_hbp); | 336 | ret = PTR_ERR(entry->ksym_hbp); |
| 339 | else | 337 | else |
| 340 | goto out; | 338 | goto out_unlock; |
| 341 | } | 339 | } |
| 342 | /* Error or "symbol:---" case: drop it */ | 340 | /* Error or "symbol:---" case: drop it */ |
| 343 | ksym_filter_entry_count--; | 341 | ksym_filter_entry_count--; |
| 344 | hlist_del_rcu(&(entry->ksym_hlist)); | 342 | hlist_del_rcu(&(entry->ksym_hlist)); |
| 345 | synchronize_rcu(); | 343 | synchronize_rcu(); |
| 346 | kfree(entry); | 344 | kfree(entry); |
| 347 | goto out; | 345 | goto out_unlock; |
| 348 | } else { | 346 | } else { |
| 349 | /* Check for malformed request: (4) */ | 347 | /* Check for malformed request: (4) */ |
| 350 | if (op == 0) | 348 | if (op) |
| 351 | goto out; | 349 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); |
| 352 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
| 353 | } | 350 | } |
| 354 | out: | 351 | out_unlock: |
| 355 | mutex_unlock(&ksym_tracer_mutex); | 352 | mutex_unlock(&ksym_tracer_mutex); |
| 356 | 353 | out: | |
| 357 | kfree(input_string); | 354 | kfree(buf); |
| 358 | 355 | return !ret ? count : ret; | |
| 359 | if (!ret) | ||
| 360 | ret = count; | ||
| 361 | return ret; | ||
| 362 | } | 356 | } |
| 363 | 357 | ||
| 364 | static const struct file_operations ksym_tracing_fops = { | 358 | static const struct file_operations ksym_tracing_fops = { |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 57501d90096a..75289f372dd2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -217,10 +217,6 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) | |||
| 217 | int i; | 217 | int i; |
| 218 | int offset = offsetof(typeof(trace), args); | 218 | int offset = offsetof(typeof(trace), args); |
| 219 | 219 | ||
| 220 | ret = trace_define_common_fields(call); | ||
| 221 | if (ret) | ||
| 222 | return ret; | ||
| 223 | |||
| 224 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); | 220 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 225 | if (ret) | 221 | if (ret) |
| 226 | return ret; | 222 | return ret; |
| @@ -241,10 +237,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
| 241 | struct syscall_trace_exit trace; | 237 | struct syscall_trace_exit trace; |
| 242 | int ret; | 238 | int ret; |
| 243 | 239 | ||
| 244 | ret = trace_define_common_fields(call); | ||
| 245 | if (ret) | ||
| 246 | return ret; | ||
| 247 | |||
| 248 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); | 240 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 249 | if (ret) | 241 | if (ret) |
| 250 | return ret; | 242 | return ret; |
| @@ -333,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
| 333 | mutex_lock(&syscall_trace_lock); | 325 | mutex_lock(&syscall_trace_lock); |
| 334 | if (!sys_refcount_enter) | 326 | if (!sys_refcount_enter) |
| 335 | ret = register_trace_sys_enter(ftrace_syscall_enter); | 327 | ret = register_trace_sys_enter(ftrace_syscall_enter); |
| 336 | if (ret) { | 328 | if (!ret) { |
| 337 | pr_info("event trace: Could not activate" | ||
| 338 | "syscall entry trace point"); | ||
| 339 | } else { | ||
| 340 | set_bit(num, enabled_enter_syscalls); | 329 | set_bit(num, enabled_enter_syscalls); |
| 341 | sys_refcount_enter++; | 330 | sys_refcount_enter++; |
| 342 | } | 331 | } |
| @@ -370,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
| 370 | mutex_lock(&syscall_trace_lock); | 359 | mutex_lock(&syscall_trace_lock); |
| 371 | if (!sys_refcount_exit) | 360 | if (!sys_refcount_exit) |
| 372 | ret = register_trace_sys_exit(ftrace_syscall_exit); | 361 | ret = register_trace_sys_exit(ftrace_syscall_exit); |
| 373 | if (ret) { | 362 | if (!ret) { |
| 374 | pr_info("event trace: Could not activate" | ||
| 375 | "syscall exit trace point"); | ||
| 376 | } else { | ||
| 377 | set_bit(num, enabled_exit_syscalls); | 363 | set_bit(num, enabled_exit_syscalls); |
| 378 | sys_refcount_exit++; | 364 | sys_refcount_exit++; |
| 379 | } | 365 | } |
