diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 4 | ||||
-rw-r--r-- | kernel/module.c | 26 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace.c | 35 | ||||
-rw-r--r-- | kernel/tracepoint.c | 7 |
5 files changed, 50 insertions, 31 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 479d4d5672f9..5016bfb682b9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -919,10 +919,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
919 | ri->rp = rp; | 919 | ri->rp = rp; |
920 | ri->task = current; | 920 | ri->task = current; |
921 | 921 | ||
922 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { | 922 | if (rp->entry_handler && rp->entry_handler(ri, regs)) |
923 | spin_unlock_irqrestore(&rp->lock, flags); | ||
924 | return 0; | 923 | return 0; |
925 | } | ||
926 | 924 | ||
927 | arch_prepare_kretprobe(ri, regs); | 925 | arch_prepare_kretprobe(ri, regs); |
928 | 926 | ||
diff --git a/kernel/module.c b/kernel/module.c index 8b742f2b3845..7fa134e0cc24 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2049,14 +2049,6 @@ static noinline struct module *load_module(void __user *umod, | |||
2049 | if (err < 0) | 2049 | if (err < 0) |
2050 | goto free_mod; | 2050 | goto free_mod; |
2051 | 2051 | ||
2052 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2053 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
2054 | mod->name); | ||
2055 | if (!mod->refptr) { | ||
2056 | err = -ENOMEM; | ||
2057 | goto free_mod; | ||
2058 | } | ||
2059 | #endif | ||
2060 | if (pcpuindex) { | 2052 | if (pcpuindex) { |
2061 | /* We have a special allocation for this section. */ | 2053 | /* We have a special allocation for this section. */ |
2062 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 2054 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
@@ -2064,7 +2056,7 @@ static noinline struct module *load_module(void __user *umod, | |||
2064 | mod->name); | 2056 | mod->name); |
2065 | if (!percpu) { | 2057 | if (!percpu) { |
2066 | err = -ENOMEM; | 2058 | err = -ENOMEM; |
2067 | goto free_percpu; | 2059 | goto free_mod; |
2068 | } | 2060 | } |
2069 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2061 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2070 | mod->percpu = percpu; | 2062 | mod->percpu = percpu; |
@@ -2116,6 +2108,14 @@ static noinline struct module *load_module(void __user *umod, | |||
2116 | /* Module has been moved. */ | 2108 | /* Module has been moved. */ |
2117 | mod = (void *)sechdrs[modindex].sh_addr; | 2109 | mod = (void *)sechdrs[modindex].sh_addr; |
2118 | 2110 | ||
2111 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2112 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
2113 | mod->name); | ||
2114 | if (!mod->refptr) { | ||
2115 | err = -ENOMEM; | ||
2116 | goto free_init; | ||
2117 | } | ||
2118 | #endif | ||
2119 | /* Now we've moved module, initialize linked lists, etc. */ | 2119 | /* Now we've moved module, initialize linked lists, etc. */ |
2120 | module_unload_init(mod); | 2120 | module_unload_init(mod); |
2121 | 2121 | ||
@@ -2322,15 +2322,17 @@ static noinline struct module *load_module(void __user *umod, | |||
2322 | ftrace_release(mod->module_core, mod->core_size); | 2322 | ftrace_release(mod->module_core, mod->core_size); |
2323 | free_unload: | 2323 | free_unload: |
2324 | module_unload_free(mod); | 2324 | module_unload_free(mod); |
2325 | free_init: | ||
2326 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2327 | percpu_modfree(mod->refptr); | ||
2328 | #endif | ||
2325 | module_free(mod, mod->module_init); | 2329 | module_free(mod, mod->module_init); |
2326 | free_core: | 2330 | free_core: |
2327 | module_free(mod, mod->module_core); | 2331 | module_free(mod, mod->module_core); |
2332 | /* mod will be freed with core. Don't access it beyond this line! */ | ||
2328 | free_percpu: | 2333 | free_percpu: |
2329 | if (percpu) | 2334 | if (percpu) |
2330 | percpu_modfree(percpu); | 2335 | percpu_modfree(percpu); |
2331 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2332 | percpu_modfree(mod->refptr); | ||
2333 | #endif | ||
2334 | free_mod: | 2336 | free_mod: |
2335 | kfree(args); | 2337 | kfree(args); |
2336 | free_hdr: | 2338 | free_hdr: |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bbf51922a8ca..384ca5d9d729 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -577,8 +577,17 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
577 | if (buffer->pages == 1) | 577 | if (buffer->pages == 1) |
578 | buffer->pages++; | 578 | buffer->pages++; |
579 | 579 | ||
580 | /* | ||
581 | * In case of non-hotplug cpu, if the ring-buffer is allocated | ||
582 | * in early initcall, it will not be notified of secondary cpus. | ||
583 | * In that off case, we need to allocate for all possible cpus. | ||
584 | */ | ||
585 | #ifdef CONFIG_HOTPLUG_CPU | ||
580 | get_online_cpus(); | 586 | get_online_cpus(); |
581 | cpumask_copy(buffer->cpumask, cpu_online_mask); | 587 | cpumask_copy(buffer->cpumask, cpu_online_mask); |
588 | #else | ||
589 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | ||
590 | #endif | ||
582 | buffer->cpus = nr_cpu_ids; | 591 | buffer->cpus = nr_cpu_ids; |
583 | 592 | ||
584 | bsize = sizeof(void *) * nr_cpu_ids; | 593 | bsize = sizeof(void *) * nr_cpu_ids; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f7f359d45823..e3dfefe69348 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -641,6 +641,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
641 | } | 641 | } |
642 | 642 | ||
643 | #define SAVED_CMDLINES 128 | 643 | #define SAVED_CMDLINES 128 |
644 | #define NO_CMDLINE_MAP UINT_MAX | ||
644 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 645 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
645 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 646 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
646 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 647 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
@@ -652,8 +653,8 @@ static atomic_t trace_record_cmdline_disabled __read_mostly; | |||
652 | 653 | ||
653 | static void trace_init_cmdlines(void) | 654 | static void trace_init_cmdlines(void) |
654 | { | 655 | { |
655 | memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); | 656 | memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); |
656 | memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); | 657 | memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); |
657 | cmdline_idx = 0; | 658 | cmdline_idx = 0; |
658 | } | 659 | } |
659 | 660 | ||
@@ -745,8 +746,7 @@ void trace_stop_cmdline_recording(void); | |||
745 | 746 | ||
746 | static void trace_save_cmdline(struct task_struct *tsk) | 747 | static void trace_save_cmdline(struct task_struct *tsk) |
747 | { | 748 | { |
748 | unsigned map; | 749 | unsigned pid, idx; |
749 | unsigned idx; | ||
750 | 750 | ||
751 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 751 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) |
752 | return; | 752 | return; |
@@ -761,13 +761,20 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
761 | return; | 761 | return; |
762 | 762 | ||
763 | idx = map_pid_to_cmdline[tsk->pid]; | 763 | idx = map_pid_to_cmdline[tsk->pid]; |
764 | if (idx >= SAVED_CMDLINES) { | 764 | if (idx == NO_CMDLINE_MAP) { |
765 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 765 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; |
766 | 766 | ||
767 | map = map_cmdline_to_pid[idx]; | 767 | /* |
768 | if (map <= PID_MAX_DEFAULT) | 768 | * Check whether the cmdline buffer at idx has a pid |
769 | map_pid_to_cmdline[map] = (unsigned)-1; | 769 | * mapped. We are going to overwrite that entry so we |
770 | * need to clear the map_pid_to_cmdline. Otherwise we | ||
771 | * would read the new comm for the old pid. | ||
772 | */ | ||
773 | pid = map_cmdline_to_pid[idx]; | ||
774 | if (pid != NO_CMDLINE_MAP) | ||
775 | map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | ||
770 | 776 | ||
777 | map_cmdline_to_pid[idx] = tsk->pid; | ||
771 | map_pid_to_cmdline[tsk->pid] = idx; | 778 | map_pid_to_cmdline[tsk->pid] = idx; |
772 | 779 | ||
773 | cmdline_idx = idx; | 780 | cmdline_idx = idx; |
@@ -794,18 +801,18 @@ void trace_find_cmdline(int pid, char comm[]) | |||
794 | 801 | ||
795 | __raw_spin_lock(&trace_cmdline_lock); | 802 | __raw_spin_lock(&trace_cmdline_lock); |
796 | map = map_pid_to_cmdline[pid]; | 803 | map = map_pid_to_cmdline[pid]; |
797 | if (map >= SAVED_CMDLINES) | 804 | if (map != NO_CMDLINE_MAP) |
798 | goto out; | 805 | strcpy(comm, saved_cmdlines[map]); |
799 | 806 | else | |
800 | strcpy(comm, saved_cmdlines[map]); | 807 | strcpy(comm, "<...>"); |
801 | 808 | ||
802 | out: | ||
803 | __raw_spin_unlock(&trace_cmdline_lock); | 809 | __raw_spin_unlock(&trace_cmdline_lock); |
804 | } | 810 | } |
805 | 811 | ||
806 | void tracing_record_cmdline(struct task_struct *tsk) | 812 | void tracing_record_cmdline(struct task_struct *tsk) |
807 | { | 813 | { |
808 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) | 814 | if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || |
815 | !tracing_is_on()) | ||
809 | return; | 816 | return; |
810 | 817 | ||
811 | trace_save_cmdline(tsk); | 818 | trace_save_cmdline(tsk); |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 79602740bbb5..1ef5d3a601c7 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -272,12 +272,15 @@ static void disable_tracepoint(struct tracepoint *elem) | |||
272 | * | 272 | * |
273 | * Updates the probe callback corresponding to a range of tracepoints. | 273 | * Updates the probe callback corresponding to a range of tracepoints. |
274 | */ | 274 | */ |
275 | void tracepoint_update_probe_range(struct tracepoint *begin, | 275 | void |
276 | struct tracepoint *end) | 276 | tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) |
277 | { | 277 | { |
278 | struct tracepoint *iter; | 278 | struct tracepoint *iter; |
279 | struct tracepoint_entry *mark_entry; | 279 | struct tracepoint_entry *mark_entry; |
280 | 280 | ||
281 | if (!begin) | ||
282 | return; | ||
283 | |||
281 | mutex_lock(&tracepoints_mutex); | 284 | mutex_lock(&tracepoints_mutex); |
282 | for (iter = begin; iter < end; iter++) { | 285 | for (iter = begin; iter < end; iter++) { |
283 | mark_entry = get_tracepoint(iter->name); | 286 | mark_entry = get_tracepoint(iter->name); |