aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c32
-rw-r--r--kernel/auditsc.c24
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/relay.c7
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_clock.c6
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sysctl.c10
-rw-r--r--kernel/time/timekeeping.c22
-rw-r--r--kernel/trace/Kconfig13
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/trace.c40
-rw-r--r--kernel/trace/trace_sched_switch.c1
-rw-r--r--kernel/trace/trace_stack.c49
18 files changed, 183 insertions, 66 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 4414e93d8750..ce6d8ea3131e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -61,8 +61,11 @@
61 61
62#include "audit.h" 62#include "audit.h"
63 63
64/* No auditing will take place until audit_initialized != 0. 64/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
65 * (Initialization happens after skb_init is called.) */ 65 * (Initialization happens after skb_init is called.) */
66#define AUDIT_DISABLED -1
67#define AUDIT_UNINITIALIZED 0
68#define AUDIT_INITIALIZED 1
66static int audit_initialized; 69static int audit_initialized;
67 70
68#define AUDIT_OFF 0 71#define AUDIT_OFF 0
@@ -965,6 +968,9 @@ static int __init audit_init(void)
965{ 968{
966 int i; 969 int i;
967 970
971 if (audit_initialized == AUDIT_DISABLED)
972 return 0;
973
968 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 974 printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
969 audit_default ? "enabled" : "disabled"); 975 audit_default ? "enabled" : "disabled");
970 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, 976 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
@@ -976,7 +982,7 @@ static int __init audit_init(void)
976 982
977 skb_queue_head_init(&audit_skb_queue); 983 skb_queue_head_init(&audit_skb_queue);
978 skb_queue_head_init(&audit_skb_hold_queue); 984 skb_queue_head_init(&audit_skb_hold_queue);
979 audit_initialized = 1; 985 audit_initialized = AUDIT_INITIALIZED;
980 audit_enabled = audit_default; 986 audit_enabled = audit_default;
981 audit_ever_enabled |= !!audit_default; 987 audit_ever_enabled |= !!audit_default;
982 988
@@ -999,13 +1005,21 @@ __initcall(audit_init);
999static int __init audit_enable(char *str) 1005static int __init audit_enable(char *str)
1000{ 1006{
1001 audit_default = !!simple_strtol(str, NULL, 0); 1007 audit_default = !!simple_strtol(str, NULL, 0);
1002 printk(KERN_INFO "audit: %s%s\n", 1008 if (!audit_default)
1003 audit_default ? "enabled" : "disabled", 1009 audit_initialized = AUDIT_DISABLED;
1004 audit_initialized ? "" : " (after initialization)"); 1010
1005 if (audit_initialized) { 1011 printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled");
1012
1013 if (audit_initialized == AUDIT_INITIALIZED) {
1006 audit_enabled = audit_default; 1014 audit_enabled = audit_default;
1007 audit_ever_enabled |= !!audit_default; 1015 audit_ever_enabled |= !!audit_default;
1016 } else if (audit_initialized == AUDIT_UNINITIALIZED) {
1017 printk(" (after initialization)");
1018 } else {
1019 printk(" (until reboot)");
1008 } 1020 }
1021 printk("\n");
1022
1009 return 1; 1023 return 1;
1010} 1024}
1011 1025
@@ -1107,9 +1121,7 @@ unsigned int audit_serial(void)
1107static inline void audit_get_stamp(struct audit_context *ctx, 1121static inline void audit_get_stamp(struct audit_context *ctx,
1108 struct timespec *t, unsigned int *serial) 1122 struct timespec *t, unsigned int *serial)
1109{ 1123{
1110 if (ctx) 1124 if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
1111 auditsc_get_stamp(ctx, t, serial);
1112 else {
1113 *t = CURRENT_TIME; 1125 *t = CURRENT_TIME;
1114 *serial = audit_serial(); 1126 *serial = audit_serial();
1115 } 1127 }
@@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1146 int reserve; 1158 int reserve;
1147 unsigned long timeout_start = jiffies; 1159 unsigned long timeout_start = jiffies;
1148 1160
1149 if (!audit_initialized) 1161 if (audit_initialized != AUDIT_INITIALIZED)
1150 return NULL; 1162 return NULL;
1151 1163
1152 if (unlikely(audit_filter_type(type))) 1164 if (unlikely(audit_filter_type(type)))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index cf5bc2f5f9c3..2a3f0afc4d2a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1459,7 +1459,6 @@ void audit_free(struct task_struct *tsk)
1459 1459
1460/** 1460/**
1461 * audit_syscall_entry - fill in an audit record at syscall entry 1461 * audit_syscall_entry - fill in an audit record at syscall entry
1462 * @tsk: task being audited
1463 * @arch: architecture type 1462 * @arch: architecture type
1464 * @major: major syscall type (function) 1463 * @major: major syscall type (function)
1465 * @a1: additional syscall register 1 1464 * @a1: additional syscall register 1
@@ -1548,9 +1547,25 @@ void audit_syscall_entry(int arch, int major,
1548 context->ppid = 0; 1547 context->ppid = 0;
1549} 1548}
1550 1549
1550void audit_finish_fork(struct task_struct *child)
1551{
1552 struct audit_context *ctx = current->audit_context;
1553 struct audit_context *p = child->audit_context;
1554 if (!p || !ctx || !ctx->auditable)
1555 return;
1556 p->arch = ctx->arch;
1557 p->major = ctx->major;
1558 memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
1559 p->ctime = ctx->ctime;
1560 p->dummy = ctx->dummy;
1561 p->auditable = ctx->auditable;
1562 p->in_syscall = ctx->in_syscall;
1563 p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
1564 p->ppid = current->pid;
1565}
1566
1551/** 1567/**
1552 * audit_syscall_exit - deallocate audit context after a system call 1568 * audit_syscall_exit - deallocate audit context after a system call
1553 * @tsk: task being audited
1554 * @valid: success/failure flag 1569 * @valid: success/failure flag
1555 * @return_code: syscall return value 1570 * @return_code: syscall return value
1556 * 1571 *
@@ -1942,15 +1957,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child);
1942 * 1957 *
1943 * Also sets the context as auditable. 1958 * Also sets the context as auditable.
1944 */ 1959 */
1945void auditsc_get_stamp(struct audit_context *ctx, 1960int auditsc_get_stamp(struct audit_context *ctx,
1946 struct timespec *t, unsigned int *serial) 1961 struct timespec *t, unsigned int *serial)
1947{ 1962{
1963 if (!ctx->in_syscall)
1964 return 0;
1948 if (!ctx->serial) 1965 if (!ctx->serial)
1949 ctx->serial = audit_serial(); 1966 ctx->serial = audit_serial();
1950 t->tv_sec = ctx->ctime.tv_sec; 1967 t->tv_sec = ctx->ctime.tv_sec;
1951 t->tv_nsec = ctx->ctime.tv_nsec; 1968 t->tv_nsec = ctx->ctime.tv_nsec;
1952 *serial = ctx->serial; 1969 *serial = ctx->serial;
1953 ctx->auditable = 1; 1970 ctx->auditable = 1;
1971 return 1;
1954} 1972}
1955 1973
1956/* global counter which is incremented every time something logs in */ 1974/* global counter which is incremented every time something logs in */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index fe00b3b983a8..8185a0f09594 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
702 * any child cgroups exist. This is theoretically supportable 702 * any child cgroups exist. This is theoretically supportable
703 * but involves complex error handling, so it's being left until 703 * but involves complex error handling, so it's being left until
704 * later */ 704 * later */
705 if (!list_empty(&cgrp->children)) 705 if (root->number_of_cgroups > 1)
706 return -EBUSY; 706 return -EBUSY;
707 707
708 /* Process each subsystem */ 708 /* Process each subsystem */
diff --git a/kernel/fork.c b/kernel/fork.c
index 7407ab319875..7b93da72d4a2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -319,17 +319,20 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
319 file = tmp->vm_file; 319 file = tmp->vm_file;
320 if (file) { 320 if (file) {
321 struct inode *inode = file->f_path.dentry->d_inode; 321 struct inode *inode = file->f_path.dentry->d_inode;
322 struct address_space *mapping = file->f_mapping;
323
322 get_file(file); 324 get_file(file);
323 if (tmp->vm_flags & VM_DENYWRITE) 325 if (tmp->vm_flags & VM_DENYWRITE)
324 atomic_dec(&inode->i_writecount); 326 atomic_dec(&inode->i_writecount);
325 327 spin_lock(&mapping->i_mmap_lock);
326 /* insert tmp into the share list, just after mpnt */ 328 if (tmp->vm_flags & VM_SHARED)
327 spin_lock(&file->f_mapping->i_mmap_lock); 329 mapping->i_mmap_writable++;
328 tmp->vm_truncate_count = mpnt->vm_truncate_count; 330 tmp->vm_truncate_count = mpnt->vm_truncate_count;
329 flush_dcache_mmap_lock(file->f_mapping); 331 flush_dcache_mmap_lock(mapping);
332 /* insert tmp into the share list, just after mpnt */
330 vma_prio_tree_add(tmp, mpnt); 333 vma_prio_tree_add(tmp, mpnt);
331 flush_dcache_mmap_unlock(file->f_mapping); 334 flush_dcache_mmap_unlock(mapping);
332 spin_unlock(&file->f_mapping->i_mmap_lock); 335 spin_unlock(&mapping->i_mmap_lock);
333 } 336 }
334 337
335 /* 338 /*
@@ -1406,6 +1409,7 @@ long do_fork(unsigned long clone_flags,
1406 init_completion(&vfork); 1409 init_completion(&vfork);
1407 } 1410 }
1408 1411
1412 audit_finish_fork(p);
1409 tracehook_report_clone(trace, regs, clone_flags, nr, p); 1413 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1410 1414
1411 /* 1415 /*
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 5e7b45c56923..449db466bdbc 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v)
191 latency_record[i].time, 191 latency_record[i].time,
192 latency_record[i].max); 192 latency_record[i].max);
193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
194 char sym[KSYM_NAME_LEN]; 194 char sym[KSYM_SYMBOL_LEN];
195 char *c; 195 char *c;
196 if (!latency_record[i].backtrace[q]) 196 if (!latency_record[i].backtrace[q])
197 break; 197 break;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 895337b16a24..4e5288a831de 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -311,7 +311,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
311 struct task_cputime cputime; 311 struct task_cputime cputime;
312 312
313 thread_group_cputime(p, &cputime); 313 thread_group_cputime(p, &cputime);
314 switch (which_clock) { 314 switch (CPUCLOCK_WHICH(which_clock)) {
315 default: 315 default:
316 return -EINVAL; 316 return -EINVAL;
317 case CPUCLOCK_PROF: 317 case CPUCLOCK_PROF:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b7713b53d07a..6da14358537c 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -633,7 +633,7 @@ void swsusp_close(fmode_t mode)
633 return; 633 return;
634 } 634 }
635 635
636 blkdev_put(resume_bdev, mode); /* move up */ 636 blkdev_put(resume_bdev, mode);
637} 637}
638 638
639static int swsusp_header_init(void) 639static int swsusp_header_init(void)
diff --git a/kernel/relay.c b/kernel/relay.c
index 32b0befdcb6a..09ac2008f77b 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1317,12 +1317,9 @@ static ssize_t relay_file_splice_read(struct file *in,
1317 if (ret < 0) 1317 if (ret < 0)
1318 break; 1318 break;
1319 else if (!ret) { 1319 else if (!ret) {
1320 if (spliced) 1320 if (flags & SPLICE_F_NONBLOCK)
1321 break;
1322 if (flags & SPLICE_F_NONBLOCK) {
1323 ret = -EAGAIN; 1321 ret = -EAGAIN;
1324 break; 1322 break;
1325 }
1326 } 1323 }
1327 1324
1328 *ppos += ret; 1325 *ppos += ret;
diff --git a/kernel/sched.c b/kernel/sched.c
index d377097572f9..ceda5799466e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2457,7 +2457,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2457 p->sched_class->task_new(rq, p); 2457 p->sched_class->task_new(rq, p);
2458 inc_nr_running(rq); 2458 inc_nr_running(rq);
2459 } 2459 }
2460 trace_sched_wakeup_new(rq, p); 2460 trace_sched_wakeup_new(rq, p, 1);
2461 check_preempt_curr(rq, p, 0); 2461 check_preempt_curr(rq, p, 0);
2462#ifdef CONFIG_SMP 2462#ifdef CONFIG_SMP
2463 if (p->sched_class->task_wake_up) 2463 if (p->sched_class->task_wake_up)
@@ -6595,7 +6595,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6595 req = list_entry(rq->migration_queue.next, 6595 req = list_entry(rq->migration_queue.next,
6596 struct migration_req, list); 6596 struct migration_req, list);
6597 list_del_init(&req->list); 6597 list_del_init(&req->list);
6598 spin_unlock_irq(&rq->lock);
6598 complete(&req->done); 6599 complete(&req->done);
6600 spin_lock_irq(&rq->lock);
6599 } 6601 }
6600 spin_unlock_irq(&rq->lock); 6602 spin_unlock_irq(&rq->lock);
6601 break; 6603 break;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 81787248b60f..e8ab096ddfe3 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
118 118
119 /* 119 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 120 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 121 * max(scd->tick_gtod, scd->clock),
122 * max(scd->clock, scd->tick_gtod + TICK_NSEC)); 122 * scd->tick_gtod + TICK_NSEC);
123 */ 123 */
124 124
125 clock = scd->tick_gtod + delta; 125 clock = scd->tick_gtod + delta;
126 min_clock = wrap_max(scd->tick_gtod, scd->clock); 126 min_clock = wrap_max(scd->tick_gtod, scd->clock);
127 max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); 127 max_clock = scd->tick_gtod + TICK_NSEC;
128 128
129 clock = wrap_max(clock, min_clock); 129 clock = wrap_max(clock, min_clock);
130 clock = wrap_min(clock, max_clock); 130 clock = wrap_min(clock, max_clock);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 3953e4aed733..dc0b3be6b7d5 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -188,7 +188,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
188 if ((long)(now - t->last_switch_timestamp) < 188 if ((long)(now - t->last_switch_timestamp) <
189 sysctl_hung_task_timeout_secs) 189 sysctl_hung_task_timeout_secs)
190 return; 190 return;
191 if (sysctl_hung_task_warnings < 0) 191 if (!sysctl_hung_task_warnings)
192 return; 192 return;
193 sysctl_hung_task_warnings--; 193 sysctl_hung_task_warnings--;
194 194
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c83f566e940a..6ac501a2dcc6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -487,6 +487,16 @@ static struct ctl_table kern_table[] = {
487 .proc_handler = &ftrace_enable_sysctl, 487 .proc_handler = &ftrace_enable_sysctl,
488 }, 488 },
489#endif 489#endif
490#ifdef CONFIG_STACK_TRACER
491 {
492 .ctl_name = CTL_UNNUMBERED,
493 .procname = "stack_tracer_enabled",
494 .data = &stack_tracer_enabled,
495 .maxlen = sizeof(int),
496 .mode = 0644,
497 .proc_handler = &stack_trace_sysctl,
498 },
499#endif
490#ifdef CONFIG_TRACING 500#ifdef CONFIG_TRACING
491 { 501 {
492 .ctl_name = CTL_UNNUMBERED, 502 .ctl_name = CTL_UNNUMBERED,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e7acfb482a68..fa05e88aa76f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -518,6 +518,28 @@ void update_wall_time(void)
518 /* correct the clock when NTP error is too big */ 518 /* correct the clock when NTP error is too big */
519 clocksource_adjust(offset); 519 clocksource_adjust(offset);
520 520
521 /*
522 * Since in the loop above, we accumulate any amount of time
523 * in xtime_nsec over a second into xtime.tv_sec, its possible for
524 * xtime_nsec to be fairly small after the loop. Further, if we're
525 * slightly speeding the clocksource up in clocksource_adjust(),
526 * its possible the required corrective factor to xtime_nsec could
527 * cause it to underflow.
528 *
529 * Now, we cannot simply roll the accumulated second back, since
530 * the NTP subsystem has been notified via second_overflow. So
531 * instead we push xtime_nsec forward by the amount we underflowed,
532 * and add that amount into the error.
533 *
534 * We'll correct this error next time through this function, when
535 * xtime_nsec is not as small.
536 */
537 if (unlikely((s64)clock->xtime_nsec < 0)) {
538 s64 neg = -(s64)clock->xtime_nsec;
539 clock->xtime_nsec = 0;
540 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
541 }
542
521 /* store full nanoseconds into xtime after rounding it up and 543 /* store full nanoseconds into xtime after rounding it up and
522 * add the remainder to the error difference. 544 * add the remainder to the error difference.
523 */ 545 */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d8bae6f4219e..e2a4ff6fc3a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -244,10 +244,15 @@ config STACK_TRACER
244 244
245 This tracer works by hooking into every function call that the 245 This tracer works by hooking into every function call that the
246 kernel executes, and keeping a maximum stack depth value and 246 kernel executes, and keeping a maximum stack depth value and
247 stack-trace saved. Because this logic has to execute in every 247 stack-trace saved. If this is configured with DYNAMIC_FTRACE
248 kernel function, all the time, this option can slow down the 248 then it will not have any overhead while the stack tracer
249 kernel measurably and is generally intended for kernel 249 is disabled.
250 developers only. 250
251 To enable the stack tracer on bootup, pass in 'stacktrace'
252 on the kernel command line.
253
254 The stack tracer can also be enabled or disabled via the
255 sysctl kernel.stack_tracer_enabled
251 256
252 Say N if unsure. 257 Say N if unsure.
253 258
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a12f80efceaa..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1047,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
1047 int type = MATCH_FULL; 1047 int type = MATCH_FULL;
1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1049 unsigned i, match = 0, search_len = 0; 1049 unsigned i, match = 0, search_len = 0;
1050 int not = 0;
1051
1052 if (buff[0] == '!') {
1053 not = 1;
1054 buff++;
1055 len--;
1056 }
1050 1057
1051 for (i = 0; i < len; i++) { 1058 for (i = 0; i < len; i++) {
1052 if (buff[i] == '*') { 1059 if (buff[i] == '*') {
@@ -1100,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
1100 matched = 1; 1107 matched = 1;
1101 break; 1108 break;
1102 } 1109 }
1103 if (matched) 1110 if (matched) {
1104 rec->flags |= flag; 1111 if (not)
1112 rec->flags &= ~flag;
1113 else
1114 rec->flags |= flag;
1115 }
1105 } 1116 }
1106 pg = pg->next; 1117 pg = pg->next;
1107 } 1118 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1a3d6b329782..0eb6d48347f7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1748,6 +1748,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1748 1748
1749static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1749static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1750 1750
1751static int task_state_char(unsigned long state)
1752{
1753 int bit = state ? __ffs(state) + 1 : 0;
1754
1755 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1756}
1757
1751/* 1758/*
1752 * The message is supposed to contain an ending newline. 1759 * The message is supposed to contain an ending newline.
1753 * If the printing stops prematurely, try to add a newline of our own. 1760 * If the printing stops prematurely, try to add a newline of our own.
@@ -1816,7 +1823,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1816 char *comm; 1823 char *comm;
1817 int S, T; 1824 int S, T;
1818 int i; 1825 int i;
1819 unsigned state;
1820 1826
1821 if (entry->type == TRACE_CONT) 1827 if (entry->type == TRACE_CONT)
1822 return TRACE_TYPE_HANDLED; 1828 return TRACE_TYPE_HANDLED;
@@ -1862,12 +1868,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1862 1868
1863 trace_assign_type(field, entry); 1869 trace_assign_type(field, entry);
1864 1870
1865 T = field->next_state < sizeof(state_to_char) ? 1871 T = task_state_char(field->next_state);
1866 state_to_char[field->next_state] : 'X'; 1872 S = task_state_char(field->prev_state);
1867
1868 state = field->prev_state ?
1869 __ffs(field->prev_state) + 1 : 0;
1870 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1871 comm = trace_find_cmdline(field->next_pid); 1873 comm = trace_find_cmdline(field->next_pid);
1872 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1874 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1873 field->prev_pid, 1875 field->prev_pid,
@@ -2008,10 +2010,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2008 2010
2009 trace_assign_type(field, entry); 2011 trace_assign_type(field, entry);
2010 2012
2011 S = field->prev_state < sizeof(state_to_char) ? 2013 T = task_state_char(field->next_state);
2012 state_to_char[field->prev_state] : 'X'; 2014 S = task_state_char(field->prev_state);
2013 T = field->next_state < sizeof(state_to_char) ?
2014 state_to_char[field->next_state] : 'X';
2015 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2015 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
2016 field->prev_pid, 2016 field->prev_pid,
2017 field->prev_prio, 2017 field->prev_prio,
@@ -2141,12 +2141,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2141 2141
2142 trace_assign_type(field, entry); 2142 trace_assign_type(field, entry);
2143 2143
2144 S = field->prev_state < sizeof(state_to_char) ? 2144 T = task_state_char(field->next_state);
2145 state_to_char[field->prev_state] : 'X'; 2145 S = entry->type == TRACE_WAKE ? '+' :
2146 T = field->next_state < sizeof(state_to_char) ? 2146 task_state_char(field->prev_state);
2147 state_to_char[field->next_state] : 'X';
2148 if (entry->type == TRACE_WAKE)
2149 S = '+';
2150 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2147 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
2151 field->prev_pid, 2148 field->prev_pid,
2152 field->prev_prio, 2149 field->prev_prio,
@@ -2233,12 +2230,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2233 2230
2234 trace_assign_type(field, entry); 2231 trace_assign_type(field, entry);
2235 2232
2236 S = field->prev_state < sizeof(state_to_char) ? 2233 T = task_state_char(field->next_state);
2237 state_to_char[field->prev_state] : 'X'; 2234 S = entry->type == TRACE_WAKE ? '+' :
2238 T = field->next_state < sizeof(state_to_char) ? 2235 task_state_char(field->prev_state);
2239 state_to_char[field->next_state] : 'X';
2240 if (entry->type == TRACE_WAKE)
2241 S = '+';
2242 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2236 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
2243 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2237 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
2244 SEQ_PUT_HEX_FIELD_RET(s, S); 2238 SEQ_PUT_HEX_FIELD_RET(s, S);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 863390557b44..781d72ef873c 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -247,3 +247,4 @@ __init static int init_sched_switch_trace(void)
247 return register_tracer(&sched_switch_trace); 247 return register_tracer(&sched_switch_trace);
248} 248}
249device_initcall(init_sched_switch_trace); 249device_initcall(init_sched_switch_trace);
250
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0b863f2cbc8e..d0871bc0aca5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sysctl.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include "trace.h" 16#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
31 32
32static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
35static DEFINE_MUTEX(stack_sysctl_mutex);
36
37int stack_tracer_enabled;
38static int last_stack_tracer_enabled;
34 39
35static inline void check_stack(void) 40static inline void check_stack(void)
36{ 41{
@@ -174,7 +179,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
174 return count; 179 return count;
175} 180}
176 181
177static struct file_operations stack_max_size_fops = { 182static const struct file_operations stack_max_size_fops = {
178 .open = tracing_open_generic, 183 .open = tracing_open_generic,
179 .read = stack_max_size_read, 184 .read = stack_max_size_read,
180 .write = stack_max_size_write, 185 .write = stack_max_size_write,
@@ -272,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
272 return 0; 277 return 0;
273} 278}
274 279
275static struct seq_operations stack_trace_seq_ops = { 280static const struct seq_operations stack_trace_seq_ops = {
276 .start = t_start, 281 .start = t_start,
277 .next = t_next, 282 .next = t_next,
278 .stop = t_stop, 283 .stop = t_stop,
@@ -288,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)
288 return ret; 293 return ret;
289} 294}
290 295
291static struct file_operations stack_trace_fops = { 296static const struct file_operations stack_trace_fops = {
292 .open = stack_trace_open, 297 .open = stack_trace_open,
293 .read = seq_read, 298 .read = seq_read,
294 .llseek = seq_lseek, 299 .llseek = seq_lseek,
295}; 300};
296 301
302int
303stack_trace_sysctl(struct ctl_table *table, int write,
304 struct file *file, void __user *buffer, size_t *lenp,
305 loff_t *ppos)
306{
307 int ret;
308
309 mutex_lock(&stack_sysctl_mutex);
310
311 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
312
313 if (ret || !write ||
314 (last_stack_tracer_enabled == stack_tracer_enabled))
315 goto out;
316
317 last_stack_tracer_enabled = stack_tracer_enabled;
318
319 if (stack_tracer_enabled)
320 register_ftrace_function(&trace_ops);
321 else
322 unregister_ftrace_function(&trace_ops);
323
324 out:
325 mutex_unlock(&stack_sysctl_mutex);
326 return ret;
327}
328
329static __init int enable_stacktrace(char *str)
330{
331 stack_tracer_enabled = 1;
332 last_stack_tracer_enabled = 1;
333 return 1;
334}
335__setup("stacktrace", enable_stacktrace);
336
297static __init int stack_trace_init(void) 337static __init int stack_trace_init(void)
298{ 338{
299 struct dentry *d_tracer; 339 struct dentry *d_tracer;
@@ -311,7 +351,8 @@ static __init int stack_trace_init(void)
311 if (!entry) 351 if (!entry)
312 pr_warning("Could not create debugfs 'stack_trace' entry\n"); 352 pr_warning("Could not create debugfs 'stack_trace' entry\n");
313 353
314 register_ftrace_function(&trace_ops); 354 if (stack_tracer_enabled)
355 register_ftrace_function(&trace_ops);
315 356
316 return 0; 357 return 0;
317} 358}