diff options
author | David S. Miller <davem@davemloft.net> | 2013-03-27 13:52:49 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-03-27 13:52:49 -0400 |
commit | e2a553dbf18a5177fdebe29495c32a8e7fd3a4db (patch) | |
tree | 5ccb3d498325a7aaf93f49549eca03cb7861ca1c /kernel | |
parent | 7559d97993ae7d552c96313155286f372cf4cf7c (diff) | |
parent | a8c45289f215e137825bf9630d0abb41c1dc41ff (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
include/net/ipip.h
The changes made to ipip.h in 'net' were already included
in 'net-next' before that header was moved to another location.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 8 | ||||
-rw-r--r-- | kernel/printk.c | 80 | ||||
-rw-r--r-- | kernel/sys.c | 57 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 3 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 59 | ||||
-rw-r--r-- | kernel/trace/trace.h | 6 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 19 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 18 |
9 files changed, 161 insertions, 93 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b0cd86501c30..59412d037eed 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
4434 | if (ctxn < 0) | 4434 | if (ctxn < 0) |
4435 | goto next; | 4435 | goto next; |
4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
4437 | if (ctx) | ||
4438 | perf_event_task_ctx(ctx, task_event); | ||
4437 | } | 4439 | } |
4438 | if (ctx) | ||
4439 | perf_event_task_ctx(ctx, task_event); | ||
4440 | next: | 4440 | next: |
4441 | put_cpu_ptr(pmu->pmu_cpu_context); | 4441 | put_cpu_ptr(pmu->pmu_cpu_context); |
4442 | } | 4442 | } |
4443 | if (task_event->task_ctx) | ||
4444 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
4445 | |||
4443 | rcu_read_unlock(); | 4446 | rcu_read_unlock(); |
4444 | } | 4447 | } |
4445 | 4448 | ||
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) | |||
5647 | event->attr.sample_period = NSEC_PER_SEC / freq; | 5650 | event->attr.sample_period = NSEC_PER_SEC / freq; |
5648 | hwc->sample_period = event->attr.sample_period; | 5651 | hwc->sample_period = event->attr.sample_period; |
5649 | local64_set(&hwc->period_left, hwc->sample_period); | 5652 | local64_set(&hwc->period_left, hwc->sample_period); |
5653 | hwc->last_period = hwc->sample_period; | ||
5650 | event->attr.freq = 0; | 5654 | event->attr.freq = 0; |
5651 | } | 5655 | } |
5652 | } | 5656 | } |
diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335a..abbdd9e2ac82 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) | |||
63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ | 63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ |
64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ | 64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ |
65 | 65 | ||
66 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
67 | |||
68 | int console_printk[4] = { | 66 | int console_printk[4] = { |
69 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ | 67 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ |
70 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ | 68 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ |
@@ -224,6 +222,7 @@ struct log { | |||
224 | static DEFINE_RAW_SPINLOCK(logbuf_lock); | 222 | static DEFINE_RAW_SPINLOCK(logbuf_lock); |
225 | 223 | ||
226 | #ifdef CONFIG_PRINTK | 224 | #ifdef CONFIG_PRINTK |
225 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
227 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ | 226 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
228 | static u64 syslog_seq; | 227 | static u64 syslog_seq; |
229 | static u32 syslog_idx; | 228 | static u32 syslog_idx; |
@@ -1957,45 +1956,6 @@ int is_console_locked(void) | |||
1957 | return console_locked; | 1956 | return console_locked; |
1958 | } | 1957 | } |
1959 | 1958 | ||
1960 | /* | ||
1961 | * Delayed printk version, for scheduler-internal messages: | ||
1962 | */ | ||
1963 | #define PRINTK_BUF_SIZE 512 | ||
1964 | |||
1965 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
1966 | #define PRINTK_PENDING_SCHED 0x02 | ||
1967 | |||
1968 | static DEFINE_PER_CPU(int, printk_pending); | ||
1969 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
1970 | |||
1971 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
1972 | { | ||
1973 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
1974 | |||
1975 | if (pending & PRINTK_PENDING_SCHED) { | ||
1976 | char *buf = __get_cpu_var(printk_sched_buf); | ||
1977 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
1978 | } | ||
1979 | |||
1980 | if (pending & PRINTK_PENDING_WAKEUP) | ||
1981 | wake_up_interruptible(&log_wait); | ||
1982 | } | ||
1983 | |||
1984 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
1985 | .func = wake_up_klogd_work_func, | ||
1986 | .flags = IRQ_WORK_LAZY, | ||
1987 | }; | ||
1988 | |||
1989 | void wake_up_klogd(void) | ||
1990 | { | ||
1991 | preempt_disable(); | ||
1992 | if (waitqueue_active(&log_wait)) { | ||
1993 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
1994 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
1995 | } | ||
1996 | preempt_enable(); | ||
1997 | } | ||
1998 | |||
1999 | static void console_cont_flush(char *text, size_t size) | 1959 | static void console_cont_flush(char *text, size_t size) |
2000 | { | 1960 | { |
2001 | unsigned long flags; | 1961 | unsigned long flags; |
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) | |||
2458 | late_initcall(printk_late_init); | 2418 | late_initcall(printk_late_init); |
2459 | 2419 | ||
2460 | #if defined CONFIG_PRINTK | 2420 | #if defined CONFIG_PRINTK |
2421 | /* | ||
2422 | * Delayed printk version, for scheduler-internal messages: | ||
2423 | */ | ||
2424 | #define PRINTK_BUF_SIZE 512 | ||
2425 | |||
2426 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
2427 | #define PRINTK_PENDING_SCHED 0x02 | ||
2428 | |||
2429 | static DEFINE_PER_CPU(int, printk_pending); | ||
2430 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
2431 | |||
2432 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
2433 | { | ||
2434 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
2435 | |||
2436 | if (pending & PRINTK_PENDING_SCHED) { | ||
2437 | char *buf = __get_cpu_var(printk_sched_buf); | ||
2438 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
2439 | } | ||
2440 | |||
2441 | if (pending & PRINTK_PENDING_WAKEUP) | ||
2442 | wake_up_interruptible(&log_wait); | ||
2443 | } | ||
2444 | |||
2445 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
2446 | .func = wake_up_klogd_work_func, | ||
2447 | .flags = IRQ_WORK_LAZY, | ||
2448 | }; | ||
2449 | |||
2450 | void wake_up_klogd(void) | ||
2451 | { | ||
2452 | preempt_disable(); | ||
2453 | if (waitqueue_active(&log_wait)) { | ||
2454 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
2455 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2456 | } | ||
2457 | preempt_enable(); | ||
2458 | } | ||
2461 | 2459 | ||
2462 | int printk_sched(const char *fmt, ...) | 2460 | int printk_sched(const char *fmt, ...) |
2463 | { | 2461 | { |
diff --git a/kernel/sys.c b/kernel/sys.c index 81f56445fba9..39c9c4a2949f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | |||
2185 | 2185 | ||
2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
2187 | 2187 | ||
2188 | static int __orderly_poweroff(void) | 2188 | static int __orderly_poweroff(bool force) |
2189 | { | 2189 | { |
2190 | int argc; | ||
2191 | char **argv; | 2190 | char **argv; |
2192 | static char *envp[] = { | 2191 | static char *envp[] = { |
2193 | "HOME=/", | 2192 | "HOME=/", |
@@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void) | |||
2196 | }; | 2195 | }; |
2197 | int ret; | 2196 | int ret; |
2198 | 2197 | ||
2199 | argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); | 2198 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); |
2200 | if (argv == NULL) { | 2199 | if (argv) { |
2200 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
2201 | argv_free(argv); | ||
2202 | } else { | ||
2201 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | 2203 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
2202 | __func__, poweroff_cmd); | 2204 | __func__, poweroff_cmd); |
2203 | return -ENOMEM; | 2205 | ret = -ENOMEM; |
2204 | } | 2206 | } |
2205 | 2207 | ||
2206 | ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, | 2208 | if (ret && force) { |
2207 | NULL, NULL, NULL); | 2209 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
2208 | argv_free(argv); | 2210 | "forcing the issue\n"); |
2211 | /* | ||
2212 | * I guess this should try to kick off some daemon to sync and | ||
2213 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2214 | * emergency shutdown? | ||
2215 | */ | ||
2216 | emergency_sync(); | ||
2217 | kernel_power_off(); | ||
2218 | } | ||
2209 | 2219 | ||
2210 | return ret; | 2220 | return ret; |
2211 | } | 2221 | } |
2212 | 2222 | ||
2223 | static bool poweroff_force; | ||
2224 | |||
2225 | static void poweroff_work_func(struct work_struct *work) | ||
2226 | { | ||
2227 | __orderly_poweroff(poweroff_force); | ||
2228 | } | ||
2229 | |||
2230 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
2231 | |||
2213 | /** | 2232 | /** |
2214 | * orderly_poweroff - Trigger an orderly system poweroff | 2233 | * orderly_poweroff - Trigger an orderly system poweroff |
2215 | * @force: force poweroff if command execution fails | 2234 | * @force: force poweroff if command execution fails |
@@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void) | |||
2219 | */ | 2238 | */ |
2220 | int orderly_poweroff(bool force) | 2239 | int orderly_poweroff(bool force) |
2221 | { | 2240 | { |
2222 | int ret = __orderly_poweroff(); | 2241 | if (force) /* do not override the pending "true" */ |
2223 | 2242 | poweroff_force = true; | |
2224 | if (ret && force) { | 2243 | schedule_work(&poweroff_work); |
2225 | printk(KERN_WARNING "Failed to start orderly shutdown: " | 2244 | return 0; |
2226 | "forcing the issue\n"); | ||
2227 | |||
2228 | /* | ||
2229 | * I guess this should try to kick off some daemon to sync and | ||
2230 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2231 | * emergency shutdown? | ||
2232 | */ | ||
2233 | emergency_sync(); | ||
2234 | kernel_power_off(); | ||
2235 | } | ||
2236 | |||
2237 | return ret; | ||
2238 | } | 2245 | } |
2239 | EXPORT_SYMBOL_GPL(orderly_poweroff); | 2246 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2fb8cb88df8d..7f32fe0e52cd 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |||
67 | */ | 67 | */ |
68 | int tick_check_broadcast_device(struct clock_event_device *dev) | 68 | int tick_check_broadcast_device(struct clock_event_device *dev) |
69 | { | 69 | { |
70 | if ((tick_broadcast_device.evtdev && | 70 | if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || |
71 | (tick_broadcast_device.evtdev && | ||
71 | tick_broadcast_device.evtdev->rating >= dev->rating) || | 72 | tick_broadcast_device.evtdev->rating >= dev->rating) || |
72 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | 73 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) |
73 | return 0; | 74 | return 0; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ab25b88aae56..6893d5a2bf08 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3104 | continue; | 3104 | continue; |
3105 | } | 3105 | } |
3106 | 3106 | ||
3107 | hlist_del(&entry->node); | 3107 | hlist_del_rcu(&entry->node); |
3108 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | 3108 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); |
3109 | } | 3109 | } |
3110 | } | 3110 | } |
3111 | __disable_ftrace_function_probe(); | 3111 | __disable_ftrace_function_probe(); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1f835a83cb2c..4f1dade56981 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
704 | void | 704 | void |
705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
706 | { | 706 | { |
707 | struct ring_buffer *buf = tr->buffer; | 707 | struct ring_buffer *buf; |
708 | 708 | ||
709 | if (trace_stop_count) | 709 | if (trace_stop_count) |
710 | return; | 710 | return; |
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
719 | 719 | ||
720 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
721 | 721 | ||
722 | buf = tr->buffer; | ||
722 | tr->buffer = max_tr.buffer; | 723 | tr->buffer = max_tr.buffer; |
723 | max_tr.buffer = buf; | 724 | max_tr.buffer = buf; |
724 | 725 | ||
@@ -2880,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2880 | return -EINVAL; | 2881 | return -EINVAL; |
2881 | } | 2882 | } |
2882 | 2883 | ||
2883 | static void set_tracer_flags(unsigned int mask, int enabled) | 2884 | /* Some tracers require overwrite to stay enabled */ |
2885 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | ||
2886 | { | ||
2887 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | ||
2888 | return -1; | ||
2889 | |||
2890 | return 0; | ||
2891 | } | ||
2892 | |||
2893 | int set_tracer_flag(unsigned int mask, int enabled) | ||
2884 | { | 2894 | { |
2885 | /* do nothing if flag is already set */ | 2895 | /* do nothing if flag is already set */ |
2886 | if (!!(trace_flags & mask) == !!enabled) | 2896 | if (!!(trace_flags & mask) == !!enabled) |
2887 | return; | 2897 | return 0; |
2898 | |||
2899 | /* Give the tracer a chance to approve the change */ | ||
2900 | if (current_trace->flag_changed) | ||
2901 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | ||
2902 | return -EINVAL; | ||
2888 | 2903 | ||
2889 | if (enabled) | 2904 | if (enabled) |
2890 | trace_flags |= mask; | 2905 | trace_flags |= mask; |
@@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2894 | if (mask == TRACE_ITER_RECORD_CMD) | 2909 | if (mask == TRACE_ITER_RECORD_CMD) |
2895 | trace_event_enable_cmd_record(enabled); | 2910 | trace_event_enable_cmd_record(enabled); |
2896 | 2911 | ||
2897 | if (mask == TRACE_ITER_OVERWRITE) | 2912 | if (mask == TRACE_ITER_OVERWRITE) { |
2898 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 2913 | ring_buffer_change_overwrite(global_trace.buffer, enabled); |
2914 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2915 | ring_buffer_change_overwrite(max_tr.buffer, enabled); | ||
2916 | #endif | ||
2917 | } | ||
2899 | 2918 | ||
2900 | if (mask == TRACE_ITER_PRINTK) | 2919 | if (mask == TRACE_ITER_PRINTK) |
2901 | trace_printk_start_stop_comm(enabled); | 2920 | trace_printk_start_stop_comm(enabled); |
2921 | |||
2922 | return 0; | ||
2902 | } | 2923 | } |
2903 | 2924 | ||
2904 | static int trace_set_options(char *option) | 2925 | static int trace_set_options(char *option) |
2905 | { | 2926 | { |
2906 | char *cmp; | 2927 | char *cmp; |
2907 | int neg = 0; | 2928 | int neg = 0; |
2908 | int ret = 0; | 2929 | int ret = -ENODEV; |
2909 | int i; | 2930 | int i; |
2910 | 2931 | ||
2911 | cmp = strstrip(option); | 2932 | cmp = strstrip(option); |
@@ -2915,19 +2936,20 @@ static int trace_set_options(char *option) | |||
2915 | cmp += 2; | 2936 | cmp += 2; |
2916 | } | 2937 | } |
2917 | 2938 | ||
2939 | mutex_lock(&trace_types_lock); | ||
2940 | |||
2918 | for (i = 0; trace_options[i]; i++) { | 2941 | for (i = 0; trace_options[i]; i++) { |
2919 | if (strcmp(cmp, trace_options[i]) == 0) { | 2942 | if (strcmp(cmp, trace_options[i]) == 0) { |
2920 | set_tracer_flags(1 << i, !neg); | 2943 | ret = set_tracer_flag(1 << i, !neg); |
2921 | break; | 2944 | break; |
2922 | } | 2945 | } |
2923 | } | 2946 | } |
2924 | 2947 | ||
2925 | /* If no option could be set, test the specific tracer options */ | 2948 | /* If no option could be set, test the specific tracer options */ |
2926 | if (!trace_options[i]) { | 2949 | if (!trace_options[i]) |
2927 | mutex_lock(&trace_types_lock); | ||
2928 | ret = set_tracer_option(current_trace, cmp, neg); | 2950 | ret = set_tracer_option(current_trace, cmp, neg); |
2929 | mutex_unlock(&trace_types_lock); | 2951 | |
2930 | } | 2952 | mutex_unlock(&trace_types_lock); |
2931 | 2953 | ||
2932 | return ret; | 2954 | return ret; |
2933 | } | 2955 | } |
@@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2937 | size_t cnt, loff_t *ppos) | 2959 | size_t cnt, loff_t *ppos) |
2938 | { | 2960 | { |
2939 | char buf[64]; | 2961 | char buf[64]; |
2962 | int ret; | ||
2940 | 2963 | ||
2941 | if (cnt >= sizeof(buf)) | 2964 | if (cnt >= sizeof(buf)) |
2942 | return -EINVAL; | 2965 | return -EINVAL; |
@@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2946 | 2969 | ||
2947 | buf[cnt] = 0; | 2970 | buf[cnt] = 0; |
2948 | 2971 | ||
2949 | trace_set_options(buf); | 2972 | ret = trace_set_options(buf); |
2973 | if (ret < 0) | ||
2974 | return ret; | ||
2950 | 2975 | ||
2951 | *ppos += cnt; | 2976 | *ppos += cnt; |
2952 | 2977 | ||
@@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char *buf) | |||
3250 | goto out; | 3275 | goto out; |
3251 | 3276 | ||
3252 | trace_branch_disable(); | 3277 | trace_branch_disable(); |
3278 | |||
3279 | current_trace->enabled = false; | ||
3280 | |||
3253 | if (current_trace->reset) | 3281 | if (current_trace->reset) |
3254 | current_trace->reset(tr); | 3282 | current_trace->reset(tr); |
3255 | 3283 | ||
@@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char *buf) | |||
3294 | } | 3322 | } |
3295 | 3323 | ||
3296 | current_trace = t; | 3324 | current_trace = t; |
3325 | current_trace->enabled = true; | ||
3297 | trace_branch_enable(tr); | 3326 | trace_branch_enable(tr); |
3298 | out: | 3327 | out: |
3299 | mutex_unlock(&trace_types_lock); | 3328 | mutex_unlock(&trace_types_lock); |
@@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4780 | 4809 | ||
4781 | if (val != 0 && val != 1) | 4810 | if (val != 0 && val != 1) |
4782 | return -EINVAL; | 4811 | return -EINVAL; |
4783 | set_tracer_flags(1 << index, val); | 4812 | |
4813 | mutex_lock(&trace_types_lock); | ||
4814 | ret = set_tracer_flag(1 << index, val); | ||
4815 | mutex_unlock(&trace_types_lock); | ||
4816 | |||
4817 | if (ret < 0) | ||
4818 | return ret; | ||
4784 | 4819 | ||
4785 | *ppos += cnt; | 4820 | *ppos += cnt; |
4786 | 4821 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 57d7e5397d56..2081971367ea 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -283,11 +283,15 @@ struct tracer { | |||
283 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 283 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
284 | /* If you handled the flag setting, return 0 */ | 284 | /* If you handled the flag setting, return 0 */ |
285 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
286 | /* Return 0 if OK with change, else return non-zero */ | ||
287 | int (*flag_changed)(struct tracer *tracer, | ||
288 | u32 mask, int set); | ||
286 | struct tracer *next; | 289 | struct tracer *next; |
287 | struct tracer_flags *flags; | 290 | struct tracer_flags *flags; |
288 | bool print_max; | 291 | bool print_max; |
289 | bool use_max_tr; | 292 | bool use_max_tr; |
290 | bool allocated_snapshot; | 293 | bool allocated_snapshot; |
294 | bool enabled; | ||
291 | }; | 295 | }; |
292 | 296 | ||
293 | 297 | ||
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
943 | 947 | ||
944 | void trace_printk_init_buffers(void); | 948 | void trace_printk_init_buffers(void); |
945 | void trace_printk_start_comm(void); | 949 | void trace_printk_start_comm(void); |
950 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | ||
951 | int set_tracer_flag(unsigned int mask, int enabled); | ||
946 | 952 | ||
947 | #undef FTRACE_ENTRY | 953 | #undef FTRACE_ENTRY |
948 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 954 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac4881..443b25b43b4f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -32,7 +32,7 @@ enum { | |||
32 | 32 | ||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | static int save_lat_flag; | 35 | static int save_flags; |
36 | 36 | ||
37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | |||
558 | 558 | ||
559 | static void __irqsoff_tracer_init(struct trace_array *tr) | 559 | static void __irqsoff_tracer_init(struct trace_array *tr) |
560 | { | 560 | { |
561 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 561 | save_flags = trace_flags; |
562 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 562 | |
563 | /* non overwrite screws up the latency tracers */ | ||
564 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
565 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
563 | 566 | ||
564 | tracing_max_latency = 0; | 567 | tracing_max_latency = 0; |
565 | irqsoff_trace = tr; | 568 | irqsoff_trace = tr; |
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
573 | 576 | ||
574 | static void irqsoff_tracer_reset(struct trace_array *tr) | 577 | static void irqsoff_tracer_reset(struct trace_array *tr) |
575 | { | 578 | { |
579 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
580 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
581 | |||
576 | stop_irqsoff_tracer(tr, is_graph()); | 582 | stop_irqsoff_tracer(tr, is_graph()); |
577 | 583 | ||
578 | if (!save_lat_flag) | 584 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
579 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 585 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
580 | } | 586 | } |
581 | 587 | ||
582 | static void irqsoff_tracer_start(struct trace_array *tr) | 588 | static void irqsoff_tracer_start(struct trace_array *tr) |
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
609 | .print_line = irqsoff_print_line, | 615 | .print_line = irqsoff_print_line, |
610 | .flags = &tracer_flags, | 616 | .flags = &tracer_flags, |
611 | .set_flag = irqsoff_set_flag, | 617 | .set_flag = irqsoff_set_flag, |
618 | .flag_changed = trace_keep_overwrite, | ||
612 | #ifdef CONFIG_FTRACE_SELFTEST | 619 | #ifdef CONFIG_FTRACE_SELFTEST |
613 | .selftest = trace_selftest_startup_irqsoff, | 620 | .selftest = trace_selftest_startup_irqsoff, |
614 | #endif | 621 | #endif |
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
642 | .print_line = irqsoff_print_line, | 649 | .print_line = irqsoff_print_line, |
643 | .flags = &tracer_flags, | 650 | .flags = &tracer_flags, |
644 | .set_flag = irqsoff_set_flag, | 651 | .set_flag = irqsoff_set_flag, |
652 | .flag_changed = trace_keep_overwrite, | ||
645 | #ifdef CONFIG_FTRACE_SELFTEST | 653 | #ifdef CONFIG_FTRACE_SELFTEST |
646 | .selftest = trace_selftest_startup_preemptoff, | 654 | .selftest = trace_selftest_startup_preemptoff, |
647 | #endif | 655 | #endif |
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
677 | .print_line = irqsoff_print_line, | 685 | .print_line = irqsoff_print_line, |
678 | .flags = &tracer_flags, | 686 | .flags = &tracer_flags, |
679 | .set_flag = irqsoff_set_flag, | 687 | .set_flag = irqsoff_set_flag, |
688 | .flag_changed = trace_keep_overwrite, | ||
680 | #ifdef CONFIG_FTRACE_SELFTEST | 689 | #ifdef CONFIG_FTRACE_SELFTEST |
681 | .selftest = trace_selftest_startup_preemptirqsoff, | 690 | .selftest = trace_selftest_startup_preemptirqsoff, |
682 | #endif | 691 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fde652c9a511 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); | |||
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | 38 | ||
39 | static int save_lat_flag; | 39 | static int save_flags; |
40 | 40 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | 41 | #define TRACE_DISPLAY_GRAPH 1 |
42 | 42 | ||
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
540 | 540 | ||
541 | static int __wakeup_tracer_init(struct trace_array *tr) | 541 | static int __wakeup_tracer_init(struct trace_array *tr) |
542 | { | 542 | { |
543 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 543 | save_flags = trace_flags; |
544 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 544 | |
545 | /* non overwrite screws up the latency tracers */ | ||
546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
545 | 548 | ||
546 | tracing_max_latency = 0; | 549 | tracing_max_latency = 0; |
547 | wakeup_trace = tr; | 550 | wakeup_trace = tr; |
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) | |||
563 | 566 | ||
564 | static void wakeup_tracer_reset(struct trace_array *tr) | 567 | static void wakeup_tracer_reset(struct trace_array *tr) |
565 | { | 568 | { |
569 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
570 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
571 | |||
566 | stop_wakeup_tracer(tr); | 572 | stop_wakeup_tracer(tr); |
567 | /* make sure we put back any tasks we are tracing */ | 573 | /* make sure we put back any tasks we are tracing */ |
568 | wakeup_reset(tr); | 574 | wakeup_reset(tr); |
569 | 575 | ||
570 | if (!save_lat_flag) | 576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
571 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
572 | } | 578 | } |
573 | 579 | ||
574 | static void wakeup_tracer_start(struct trace_array *tr) | 580 | static void wakeup_tracer_start(struct trace_array *tr) |
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
594 | .print_line = wakeup_print_line, | 600 | .print_line = wakeup_print_line, |
595 | .flags = &tracer_flags, | 601 | .flags = &tracer_flags, |
596 | .set_flag = wakeup_set_flag, | 602 | .set_flag = wakeup_set_flag, |
603 | .flag_changed = trace_keep_overwrite, | ||
597 | #ifdef CONFIG_FTRACE_SELFTEST | 604 | #ifdef CONFIG_FTRACE_SELFTEST |
598 | .selftest = trace_selftest_startup_wakeup, | 605 | .selftest = trace_selftest_startup_wakeup, |
599 | #endif | 606 | #endif |
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
615 | .print_line = wakeup_print_line, | 622 | .print_line = wakeup_print_line, |
616 | .flags = &tracer_flags, | 623 | .flags = &tracer_flags, |
617 | .set_flag = wakeup_set_flag, | 624 | .set_flag = wakeup_set_flag, |
625 | .flag_changed = trace_keep_overwrite, | ||
618 | #ifdef CONFIG_FTRACE_SELFTEST | 626 | #ifdef CONFIG_FTRACE_SELFTEST |
619 | .selftest = trace_selftest_startup_wakeup, | 627 | .selftest = trace_selftest_startup_wakeup, |
620 | #endif | 628 | #endif |