aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/posix-cpu-timers.c117
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/sched_fair.c11
-rw-r--r--kernel/sched_stats.h45
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/ring_buffer.c56
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_functions_graph.c14
-rw-r--r--kernel/trace/trace_hw_branches.c2
-rw-r--r--kernel/trace/trace_sysprof.c2
16 files changed, 228 insertions, 98 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5a54ff42874e..e14db9c089b9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2351,7 +2351,7 @@ static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 2351 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2352 struct cgroup_subsys *ss = subsys[i]; 2352 struct cgroup_subsys *ss = subsys[i];
2353 if (ss->root == root) 2353 if (ss->root == root)
2354 mutex_lock_nested(&ss->hierarchy_mutex, i); 2354 mutex_lock(&ss->hierarchy_mutex);
2355 } 2355 }
2356} 2356}
2357 2357
@@ -2637,6 +2637,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2637 BUG_ON(!list_empty(&init_task.tasks)); 2637 BUG_ON(!list_empty(&init_task.tasks));
2638 2638
2639 mutex_init(&ss->hierarchy_mutex); 2639 mutex_init(&ss->hierarchy_mutex);
2640 lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
2640 ss->active = 1; 2641 ss->active = 1;
2641} 2642}
2642 2643
diff --git a/kernel/exit.c b/kernel/exit.c
index f80dec3f1875..efd30ccf3858 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -118,6 +118,8 @@ static void __exit_signal(struct task_struct *tsk)
118 * We won't ever get here for the group leader, since it 118 * We won't ever get here for the group leader, since it
119 * will have been the last reference on the signal_struct. 119 * will have been the last reference on the signal_struct.
120 */ 120 */
121 sig->utime = cputime_add(sig->utime, task_utime(tsk));
122 sig->stime = cputime_add(sig->stime, task_stime(tsk));
121 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 123 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
122 sig->min_flt += tsk->min_flt; 124 sig->min_flt += tsk->min_flt;
123 sig->maj_flt += tsk->maj_flt; 125 sig->maj_flt += tsk->maj_flt;
@@ -126,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
126 sig->inblock += task_io_get_inblock(tsk); 128 sig->inblock += task_io_get_inblock(tsk);
127 sig->oublock += task_io_get_oublock(tsk); 129 sig->oublock += task_io_get_oublock(tsk);
128 task_io_accounting_add(&sig->ioac, &tsk->ioac); 130 task_io_accounting_add(&sig->ioac, &tsk->ioac);
131 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
129 sig = NULL; /* Marker for below. */ 132 sig = NULL; /* Marker for below. */
130 } 133 }
131 134
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d5dbb7a13e2..a66fbde20715 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -851,13 +851,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
851 sig->tty_old_pgrp = NULL; 851 sig->tty_old_pgrp = NULL;
852 sig->tty = NULL; 852 sig->tty = NULL;
853 853
854 sig->cutime = sig->cstime = cputime_zero; 854 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
855 sig->gtime = cputime_zero; 855 sig->gtime = cputime_zero;
856 sig->cgtime = cputime_zero; 856 sig->cgtime = cputime_zero;
857 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 857 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
858 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 858 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
859 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 859 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
860 task_io_accounting_init(&sig->ioac); 860 task_io_accounting_init(&sig->ioac);
861 sig->sum_sched_runtime = 0;
861 taskstats_tgid_init(sig); 862 taskstats_tgid_init(sig);
862 863
863 task_lock(current->group_leader); 864 task_lock(current->group_leader);
@@ -1094,7 +1095,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1094#ifdef CONFIG_DEBUG_MUTEXES 1095#ifdef CONFIG_DEBUG_MUTEXES
1095 p->blocked_on = NULL; /* not blocked yet */ 1096 p->blocked_on = NULL; /* not blocked yet */
1096#endif 1097#endif
1097 if (unlikely(ptrace_reparented(current))) 1098 if (unlikely(current->ptrace))
1098 ptrace_fork(p, clone_flags); 1099 ptrace_fork(p, clone_flags);
1099 1100
1100 /* Perform scheduler related setup. Assign this task to a CPU. */ 1101 /* Perform scheduler related setup. Assign this task to a CPU. */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 6a5fe93dd8bd..58762f7077ec 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itimerval *value)
62 struct task_cputime cputime; 62 struct task_cputime cputime;
63 cputime_t utime; 63 cputime_t utime;
64 64
65 thread_group_cputime(tsk, &cputime); 65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime; 66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */ 67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1); 68 cval = jiffies_to_cputime(1);
@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
82 struct task_cputime times; 82 struct task_cputime times;
83 cputime_t ptime; 83 cputime_t ptime;
84 84
85 thread_group_cputime(tsk, &times); 85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime); 86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */ 87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1); 88 cval = jiffies_to_cputime(1);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index fa07da94d7be..2313a4cc14ea 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -230,6 +230,71 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
230 return 0; 230 return 0;
231} 231}
232 232
233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234{
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
238
239 *times = INIT_CPUTIME;
240
241 rcu_read_lock();
242 sighand = rcu_dereference(tsk->sighand);
243 if (!sighand)
244 goto out;
245
246 sig = tsk->signal;
247
248 t = tsk;
249 do {
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
253
254 t = next_thread(t);
255 } while (t != tsk);
256
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
260out:
261 rcu_read_unlock();
262}
263
264static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
265{
266 if (cputime_gt(b->utime, a->utime))
267 a->utime = b->utime;
268
269 if (cputime_gt(b->stime, a->stime))
270 a->stime = b->stime;
271
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
274}
275
276void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
277{
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
280 unsigned long flags;
281
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
285 /*
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
289 * it.
290 */
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
293 }
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
296}
297
233/* 298/*
234 * Sample a process (thread group) clock for the given group_leader task. 299 * Sample a process (thread group) clock for the given group_leader task.
235 * Must be called with tasklist_lock held for reading. 300 * Must be called with tasklist_lock held for reading.
@@ -457,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
457{ 522{
458 struct task_cputime cputime; 523 struct task_cputime cputime;
459 524
460 thread_group_cputime(tsk, &cputime); 525 thread_group_cputimer(tsk, &cputime);
461 cleanup_timers(tsk->signal->cpu_timers, 526 cleanup_timers(tsk->signal->cpu_timers,
462 cputime.utime, cputime.stime, cputime.sum_exec_runtime); 527 cputime.utime, cputime.stime, cputime.sum_exec_runtime);
463} 528}
@@ -964,6 +1029,19 @@ static void check_thread_timers(struct task_struct *tsk,
964 } 1029 }
965} 1030}
966 1031
1032static void stop_process_timers(struct task_struct *tsk)
1033{
1034 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1035 unsigned long flags;
1036
1037 if (!cputimer->running)
1038 return;
1039
1040 spin_lock_irqsave(&cputimer->lock, flags);
1041 cputimer->running = 0;
1042 spin_unlock_irqrestore(&cputimer->lock, flags);
1043}
1044
967/* 1045/*
968 * Check for any per-thread CPU timers that have fired and move them 1046 * Check for any per-thread CPU timers that have fired and move them
969 * off the tsk->*_timers list onto the firing list. Per-thread timers 1047 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -987,13 +1065,15 @@ static void check_process_timers(struct task_struct *tsk,
987 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1065 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
988 list_empty(&timers[CPUCLOCK_VIRT]) && 1066 list_empty(&timers[CPUCLOCK_VIRT]) &&
989 cputime_eq(sig->it_virt_expires, cputime_zero) && 1067 cputime_eq(sig->it_virt_expires, cputime_zero) &&
990 list_empty(&timers[CPUCLOCK_SCHED])) 1068 list_empty(&timers[CPUCLOCK_SCHED])) {
1069 stop_process_timers(tsk);
991 return; 1070 return;
1071 }
992 1072
993 /* 1073 /*
994 * Collect the current process totals. 1074 * Collect the current process totals.
995 */ 1075 */
996 thread_group_cputime(tsk, &cputime); 1076 thread_group_cputimer(tsk, &cputime);
997 utime = cputime.utime; 1077 utime = cputime.utime;
998 ptime = cputime_add(utime, cputime.stime); 1078 ptime = cputime_add(utime, cputime.stime);
999 sum_sched_runtime = cputime.sum_exec_runtime; 1079 sum_sched_runtime = cputime.sum_exec_runtime;
@@ -1259,7 +1339,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1259 if (!task_cputime_zero(&sig->cputime_expires)) { 1339 if (!task_cputime_zero(&sig->cputime_expires)) {
1260 struct task_cputime group_sample; 1340 struct task_cputime group_sample;
1261 1341
1262 thread_group_cputime(tsk, &group_sample); 1342 thread_group_cputimer(tsk, &group_sample);
1263 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1343 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1264 return 1; 1344 return 1;
1265 } 1345 }
@@ -1329,6 +1409,33 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1329} 1409}
1330 1410
1331/* 1411/*
1412 * Sample a process (thread group) timer for the given group_leader task.
1413 * Must be called with tasklist_lock held for reading.
1414 */
1415static int cpu_timer_sample_group(const clockid_t which_clock,
1416 struct task_struct *p,
1417 union cpu_time_count *cpu)
1418{
1419 struct task_cputime cputime;
1420
1421 thread_group_cputimer(p, &cputime);
1422 switch (CPUCLOCK_WHICH(which_clock)) {
1423 default:
1424 return -EINVAL;
1425 case CPUCLOCK_PROF:
1426 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1427 break;
1428 case CPUCLOCK_VIRT:
1429 cpu->cpu = cputime.utime;
1430 break;
1431 case CPUCLOCK_SCHED:
1432 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
1433 break;
1434 }
1435 return 0;
1436}
1437
1438/*
1332 * Set one of the process-wide special case CPU timers. 1439 * Set one of the process-wide special case CPU timers.
1333 * The tsk->sighand->siglock must be held by the caller. 1440 * The tsk->sighand->siglock must be held by the caller.
1334 * The *newval argument is relative and we update it to be absolute, *oldval 1441 * The *newval argument is relative and we update it to be absolute, *oldval
@@ -1341,7 +1448,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1341 struct list_head *head; 1448 struct list_head *head;
1342 1449
1343 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1450 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1344 cpu_clock_sample_group(clock_idx, tsk, &now); 1451 cpu_timer_sample_group(clock_idx, tsk, &now);
1345 1452
1346 if (oldval) { 1453 if (oldval) {
1347 if (!cputime_eq(*oldval, cputime_zero)) { 1454 if (!cputime_eq(*oldval, cputime_zero)) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 1ffb89514871..5dc3b0a5d35a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2279#ifdef CONFIG_SMP 2269#ifdef CONFIG_SMP
2280 if (sched_feat(LB_WAKEUP_UPDATE)) { 2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2281 struct sched_domain *sd; 2271 struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
3890 int cpu = smp_processor_id(); 3880 int cpu = smp_processor_id();
3891 3881
3892 if (stop_tick) { 3882 if (stop_tick) {
3893 cpumask_set_cpu(cpu, nohz.cpu_mask);
3894 cpu_rq(cpu)->in_nohz_recently = 1; 3883 cpu_rq(cpu)->in_nohz_recently = 1;
3895 3884
3896 /* 3885 if (!cpu_active(cpu)) {
3897 * If we are going offline and still the leader, give up! 3886 if (atomic_read(&nohz.load_balancer) != cpu)
3898 */ 3887 return 0;
3899 if (!cpu_active(cpu) && 3888
3900 atomic_read(&nohz.load_balancer) == cpu) { 3889 /*
3890 * If we are going offline and still the leader,
3891 * give up!
3892 */
3901 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3893 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3902 BUG(); 3894 BUG();
3895
3903 return 0; 3896 return 0;
3904 } 3897 }
3905 3898
3899 cpumask_set_cpu(cpu, nohz.cpu_mask);
3900
3906 /* time for ilb owner also to sleep */ 3901 /* time for ilb owner also to sleep */
3907 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 3902 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3908 if (atomic_read(&nohz.load_balancer) == cpu) 3903 if (atomic_read(&nohz.load_balancer) == cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..0566f2a03c42 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1191 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1192 unsigned int imbalance) 1192 unsigned int imbalance)
1193{ 1193{
1194 struct task_struct *curr = this_rq->curr;
1195 struct task_group *tg;
1194 unsigned long tl = this_load; 1196 unsigned long tl = this_load;
1195 unsigned long tl_per_task; 1197 unsigned long tl_per_task;
1196 struct task_group *tg;
1197 unsigned long weight; 1198 unsigned long weight;
1198 int balanced; 1199 int balanced;
1199 1200
1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1201 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1201 return 0; 1202 return 0;
1202 1203
1204 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205 p->se.avg_overlap > sysctl_sched_migration_cost))
1206 sync = 0;
1207
1203 /* 1208 /*
1204 * If sync wakeup then subtract the (maximum possible) 1209 * If sync wakeup then subtract the (maximum possible)
1205 * effect of the currently running task from the load 1210 * effect of the currently running task from the load
@@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1426 if (!sched_feat(WAKEUP_PREEMPT)) 1431 if (!sched_feat(WAKEUP_PREEMPT))
1427 return; 1432 return;
1428 1433
1429 if (sched_feat(WAKEUP_OVERLAP) && sync) { 1434 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1435 (se->avg_overlap < sysctl_sched_migration_cost &&
1436 pse->avg_overlap < sysctl_sched_migration_cost))) {
1430 resched_task(curr); 1437 resched_task(curr);
1431 return; 1438 return;
1432 } 1439 }
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8ab0cef8ecab..a8f93dd374e1 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,19 +296,21 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
296static inline void account_group_user_time(struct task_struct *tsk, 296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime) 297 cputime_t cputime)
298{ 298{
299 struct task_cputime *times; 299 struct thread_group_cputimer *cputimer;
300 struct signal_struct *sig;
301 300
302 /* tsk == current, ensure it is safe to use ->signal */ 301 /* tsk == current, ensure it is safe to use ->signal */
303 if (unlikely(tsk->exit_state)) 302 if (unlikely(tsk->exit_state))
304 return; 303 return;
305 304
306 sig = tsk->signal; 305 cputimer = &tsk->signal->cputimer;
307 times = &sig->cputime.totals;
308 306
309 spin_lock(&times->lock); 307 if (!cputimer->running)
310 times->utime = cputime_add(times->utime, cputime); 308 return;
311 spin_unlock(&times->lock); 309
310 spin_lock(&cputimer->lock);
311 cputimer->cputime.utime =
312 cputime_add(cputimer->cputime.utime, cputime);
313 spin_unlock(&cputimer->lock);
312} 314}
313 315
314/** 316/**
@@ -324,19 +326,21 @@ static inline void account_group_user_time(struct task_struct *tsk,
324static inline void account_group_system_time(struct task_struct *tsk, 326static inline void account_group_system_time(struct task_struct *tsk,
325 cputime_t cputime) 327 cputime_t cputime)
326{ 328{
327 struct task_cputime *times; 329 struct thread_group_cputimer *cputimer;
328 struct signal_struct *sig;
329 330
330 /* tsk == current, ensure it is safe to use ->signal */ 331 /* tsk == current, ensure it is safe to use ->signal */
331 if (unlikely(tsk->exit_state)) 332 if (unlikely(tsk->exit_state))
332 return; 333 return;
333 334
334 sig = tsk->signal; 335 cputimer = &tsk->signal->cputimer;
335 times = &sig->cputime.totals; 336
337 if (!cputimer->running)
338 return;
336 339
337 spin_lock(&times->lock); 340 spin_lock(&cputimer->lock);
338 times->stime = cputime_add(times->stime, cputime); 341 cputimer->cputime.stime =
339 spin_unlock(&times->lock); 342 cputime_add(cputimer->cputime.stime, cputime);
343 spin_unlock(&cputimer->lock);
340} 344}
341 345
342/** 346/**
@@ -352,7 +356,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
352static inline void account_group_exec_runtime(struct task_struct *tsk, 356static inline void account_group_exec_runtime(struct task_struct *tsk,
353 unsigned long long ns) 357 unsigned long long ns)
354{ 358{
355 struct task_cputime *times; 359 struct thread_group_cputimer *cputimer;
356 struct signal_struct *sig; 360 struct signal_struct *sig;
357 361
358 sig = tsk->signal; 362 sig = tsk->signal;
@@ -361,9 +365,12 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
361 if (unlikely(!sig)) 365 if (unlikely(!sig))
362 return; 366 return;
363 367
364 times = &sig->cputime.totals; 368 cputimer = &sig->cputimer;
369
370 if (!cputimer->running)
371 return;
365 372
366 spin_lock(&times->lock); 373 spin_lock(&cputimer->lock);
367 times->sum_exec_runtime += ns; 374 cputimer->cputime.sum_exec_runtime += ns;
368 spin_unlock(&times->lock); 375 spin_unlock(&cputimer->lock);
369} 376}
diff --git a/kernel/signal.c b/kernel/signal.c
index b6b36768b758..2a74fe87c0dd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1367,7 +1367,6 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1367 struct siginfo info; 1367 struct siginfo info;
1368 unsigned long flags; 1368 unsigned long flags;
1369 struct sighand_struct *psig; 1369 struct sighand_struct *psig;
1370 struct task_cputime cputime;
1371 int ret = sig; 1370 int ret = sig;
1372 1371
1373 BUG_ON(sig == -1); 1372 BUG_ON(sig == -1);
@@ -1397,9 +1396,10 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1397 info.si_uid = __task_cred(tsk)->uid; 1396 info.si_uid = __task_cred(tsk)->uid;
1398 rcu_read_unlock(); 1397 rcu_read_unlock();
1399 1398
1400 thread_group_cputime(tsk, &cputime); 1399 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1401 info.si_utime = cputime_to_jiffies(cputime.utime); 1400 tsk->signal->utime));
1402 info.si_stime = cputime_to_jiffies(cputime.stime); 1401 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1402 tsk->signal->stime));
1403 1403
1404 info.si_status = tsk->exit_code & 0x7f; 1404 info.si_status = tsk->exit_code & 0x7f;
1405 if (tsk->exit_code & 0x80) 1405 if (tsk->exit_code & 0x80)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 790f9d785663..c5ef44ff850f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -101,6 +101,7 @@ static int two = 2;
101 101
102static int zero; 102static int zero;
103static int one = 1; 103static int one = 1;
104static unsigned long one_ul = 1;
104static int one_hundred = 100; 105static int one_hundred = 100;
105 106
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 107/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
@@ -974,7 +975,7 @@ static struct ctl_table vm_table[] = {
974 .mode = 0644, 975 .mode = 0644,
975 .proc_handler = &dirty_background_bytes_handler, 976 .proc_handler = &dirty_background_bytes_handler,
976 .strategy = &sysctl_intvec, 977 .strategy = &sysctl_intvec,
977 .extra1 = &one, 978 .extra1 = &one_ul,
978 }, 979 },
979 { 980 {
980 .ctl_name = VM_DIRTY_RATIO, 981 .ctl_name = VM_DIRTY_RATIO,
@@ -995,7 +996,7 @@ static struct ctl_table vm_table[] = {
995 .mode = 0644, 996 .mode = 0644,
996 .proc_handler = &dirty_bytes_handler, 997 .proc_handler = &dirty_bytes_handler,
997 .strategy = &sysctl_intvec, 998 .strategy = &sysctl_intvec,
998 .extra1 = &one, 999 .extra1 = &one_ul,
999 }, 1000 },
1000 { 1001 {
1001 .procname = "dirty_writeback_centisecs", 1002 .procname = "dirty_writeback_centisecs",
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 53ba3a6d16d0..a3901b550c93 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -59,7 +59,7 @@ enum {
59 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 59 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
60}; 60};
61 61
62static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 62static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
63 63
64/** 64/**
65 * tracing_on - enable all tracing buffers 65 * tracing_on - enable all tracing buffers
@@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off);
91 * tracing_off_permanent - permanently disable ring buffers 91 * tracing_off_permanent - permanently disable ring buffers
92 * 92 *
93 * This function, once called, will disable all ring buffers 93 * This function, once called, will disable all ring buffers
94 * permanenty. 94 * permanently.
95 */ 95 */
96void tracing_off_permanent(void) 96void tracing_off_permanent(void)
97{ 97{
@@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
210 210
211struct buffer_data_page { 211struct buffer_data_page {
212 u64 time_stamp; /* page time stamp */ 212 u64 time_stamp; /* page time stamp */
213 local_t commit; /* write commited index */ 213 local_t commit; /* write committed index */
214 unsigned char data[]; /* data of buffer page */ 214 unsigned char data[]; /* data of buffer page */
215}; 215};
216 216
@@ -260,7 +260,7 @@ struct ring_buffer_per_cpu {
260 struct list_head pages; 260 struct list_head pages;
261 struct buffer_page *head_page; /* read from head */ 261 struct buffer_page *head_page; /* read from head */
262 struct buffer_page *tail_page; /* write to tail */ 262 struct buffer_page *tail_page; /* write to tail */
263 struct buffer_page *commit_page; /* commited pages */ 263 struct buffer_page *commit_page; /* committed pages */
264 struct buffer_page *reader_page; 264 struct buffer_page *reader_page;
265 unsigned long overrun; 265 unsigned long overrun;
266 unsigned long entries; 266 unsigned long entries;
@@ -273,8 +273,8 @@ struct ring_buffer {
273 unsigned pages; 273 unsigned pages;
274 unsigned flags; 274 unsigned flags;
275 int cpus; 275 int cpus;
276 cpumask_var_t cpumask;
277 atomic_t record_disabled; 276 atomic_t record_disabled;
277 cpumask_var_t cpumask;
278 278
279 struct mutex mutex; 279 struct mutex mutex;
280 280
@@ -303,7 +303,7 @@ struct ring_buffer_iter {
303 * check_pages - integrity check of buffer pages 303 * check_pages - integrity check of buffer pages
304 * @cpu_buffer: CPU buffer with pages to test 304 * @cpu_buffer: CPU buffer with pages to test
305 * 305 *
306 * As a safty measure we check to make sure the data pages have not 306 * As a safety measure we check to make sure the data pages have not
307 * been corrupted. 307 * been corrupted.
308 */ 308 */
309static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 309static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2332EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 2332EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2333 2333
2334static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2334static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2335 struct buffer_data_page *bpage) 2335 struct buffer_data_page *bpage,
2336 unsigned int offset)
2336{ 2337{
2337 struct ring_buffer_event *event; 2338 struct ring_buffer_event *event;
2338 unsigned long head; 2339 unsigned long head;
2339 2340
2340 __raw_spin_lock(&cpu_buffer->lock); 2341 __raw_spin_lock(&cpu_buffer->lock);
2341 for (head = 0; head < local_read(&bpage->commit); 2342 for (head = offset; head < local_read(&bpage->commit);
2342 head += rb_event_length(event)) { 2343 head += rb_event_length(event)) {
2343 2344
2344 event = __rb_data_page_index(bpage, head); 2345 event = __rb_data_page_index(bpage, head);
@@ -2406,12 +2407,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2406 * to swap with a page in the ring buffer. 2407 * to swap with a page in the ring buffer.
2407 * 2408 *
2408 * for example: 2409 * for example:
2409 * rpage = ring_buffer_alloc_page(buffer); 2410 * rpage = ring_buffer_alloc_read_page(buffer);
2410 * if (!rpage) 2411 * if (!rpage)
2411 * return error; 2412 * return error;
2412 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); 2413 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2413 * if (ret) 2414 * if (ret >= 0)
2414 * process_page(rpage); 2415 * process_page(rpage, ret);
2415 * 2416 *
2416 * When @full is set, the function will not return true unless 2417 * When @full is set, the function will not return true unless
2417 * the writer is off the reader page. 2418 * the writer is off the reader page.
@@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2422 * responsible for that. 2423 * responsible for that.
2423 * 2424 *
2424 * Returns: 2425 * Returns:
2425 * 1 if data has been transferred 2426 * >=0 if data has been transferred, returns the offset of consumed data.
2426 * 0 if no data has been transferred. 2427 * <0 if no data has been transferred.
2427 */ 2428 */
2428int ring_buffer_read_page(struct ring_buffer *buffer, 2429int ring_buffer_read_page(struct ring_buffer *buffer,
2429 void **data_page, int cpu, int full) 2430 void **data_page, int cpu, int full)
@@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2432 struct ring_buffer_event *event; 2433 struct ring_buffer_event *event;
2433 struct buffer_data_page *bpage; 2434 struct buffer_data_page *bpage;
2434 unsigned long flags; 2435 unsigned long flags;
2435 int ret = 0; 2436 unsigned int read;
2437 int ret = -1;
2436 2438
2437 if (!data_page) 2439 if (!data_page)
2438 return 0; 2440 return 0;
@@ -2454,25 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2454 /* check for data */ 2456 /* check for data */
2455 if (!local_read(&cpu_buffer->reader_page->page->commit)) 2457 if (!local_read(&cpu_buffer->reader_page->page->commit))
2456 goto out; 2458 goto out;
2459
2460 read = cpu_buffer->reader_page->read;
2457 /* 2461 /*
2458 * If the writer is already off of the read page, then simply 2462 * If the writer is already off of the read page, then simply
2459 * switch the read page with the given page. Otherwise 2463 * switch the read page with the given page. Otherwise
2460 * we need to copy the data from the reader to the writer. 2464 * we need to copy the data from the reader to the writer.
2461 */ 2465 */
2462 if (cpu_buffer->reader_page == cpu_buffer->commit_page) { 2466 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2463 unsigned int read = cpu_buffer->reader_page->read; 2467 unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
2468 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2464 2469
2465 if (full) 2470 if (full)
2466 goto out; 2471 goto out;
2467 /* The writer is still on the reader page, we must copy */ 2472 /* The writer is still on the reader page, we must copy */
2468 bpage = cpu_buffer->reader_page->page; 2473 memcpy(bpage->data + read, rpage->data + read, commit - read);
2469 memcpy(bpage->data,
2470 cpu_buffer->reader_page->page->data + read,
2471 local_read(&bpage->commit) - read);
2472 2474
2473 /* consume what was read */ 2475 /* consume what was read */
2474 cpu_buffer->reader_page += read; 2476 cpu_buffer->reader_page->read = commit;
2475 2477
2478 /* update bpage */
2479 local_set(&bpage->commit, commit);
2480 if (!read)
2481 bpage->time_stamp = rpage->time_stamp;
2476 } else { 2482 } else {
2477 /* swap the pages */ 2483 /* swap the pages */
2478 rb_init_page(bpage); 2484 rb_init_page(bpage);
@@ -2481,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2481 cpu_buffer->reader_page->read = 0; 2487 cpu_buffer->reader_page->read = 0;
2482 *data_page = bpage; 2488 *data_page = bpage;
2483 } 2489 }
2484 ret = 1; 2490 ret = read;
2485 2491
2486 /* update the entry counter */ 2492 /* update the entry counter */
2487 rb_remove_entries(cpu_buffer, bpage); 2493 rb_remove_entries(cpu_buffer, bpage, read);
2488 out: 2494 out:
2489 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2495 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2490 2496
@@ -2495,7 +2501,7 @@ static ssize_t
2495rb_simple_read(struct file *filp, char __user *ubuf, 2501rb_simple_read(struct file *filp, char __user *ubuf,
2496 size_t cnt, loff_t *ppos) 2502 size_t cnt, loff_t *ppos)
2497{ 2503{
2498 long *p = filp->private_data; 2504 unsigned long *p = filp->private_data;
2499 char buf[64]; 2505 char buf[64];
2500 int r; 2506 int r;
2501 2507
@@ -2511,9 +2517,9 @@ static ssize_t
2511rb_simple_write(struct file *filp, const char __user *ubuf, 2517rb_simple_write(struct file *filp, const char __user *ubuf,
2512 size_t cnt, loff_t *ppos) 2518 size_t cnt, loff_t *ppos)
2513{ 2519{
2514 long *p = filp->private_data; 2520 unsigned long *p = filp->private_data;
2515 char buf[64]; 2521 char buf[64];
2516 long val; 2522 unsigned long val;
2517 int ret; 2523 int ret;
2518 2524
2519 if (cnt >= sizeof(buf)) 2525 if (cnt >= sizeof(buf))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d89821283b47..95f99a7abf2f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -80,7 +80,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
80 * of the tracer is successful. But that is the only place that sets 80 * of the tracer is successful. But that is the only place that sets
81 * this back to zero. 81 * this back to zero.
82 */ 82 */
83int tracing_disabled = 1; 83static int tracing_disabled = 1;
84 84
85static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 85static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
86 86
@@ -459,6 +459,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
459 * Register a new plugin tracer. 459 * Register a new plugin tracer.
460 */ 460 */
461int register_tracer(struct tracer *type) 461int register_tracer(struct tracer *type)
462__releases(kernel_lock)
463__acquires(kernel_lock)
462{ 464{
463 struct tracer *t; 465 struct tracer *t;
464 int len; 466 int len;
@@ -626,7 +628,7 @@ static int cmdline_idx;
626static DEFINE_SPINLOCK(trace_cmdline_lock); 628static DEFINE_SPINLOCK(trace_cmdline_lock);
627 629
628/* temporary disable recording */ 630/* temporary disable recording */
629atomic_t trace_record_cmdline_disabled __read_mostly; 631static atomic_t trace_record_cmdline_disabled __read_mostly;
630 632
631static void trace_init_cmdlines(void) 633static void trace_init_cmdlines(void)
632{ 634{
@@ -983,10 +985,12 @@ static void ftrace_trace_userstack(struct trace_array *tr,
983#endif 985#endif
984} 986}
985 987
986void __trace_userstack(struct trace_array *tr, unsigned long flags) 988#ifdef UNUSED
989static void __trace_userstack(struct trace_array *tr, unsigned long flags)
987{ 990{
988 ftrace_trace_userstack(tr, flags, preempt_count()); 991 ftrace_trace_userstack(tr, flags, preempt_count());
989} 992}
993#endif /* UNUSED */
990 994
991static void 995static void
992ftrace_trace_special(void *__tr, 996ftrace_trace_special(void *__tr,
@@ -1720,7 +1724,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
1720 return 0; 1724 return 0;
1721} 1725}
1722 1726
1723int tracing_release(struct inode *inode, struct file *file) 1727static int tracing_release(struct inode *inode, struct file *file)
1724{ 1728{
1725 struct seq_file *m = (struct seq_file *)file->private_data; 1729 struct seq_file *m = (struct seq_file *)file->private_data;
1726 struct trace_iterator *iter = m->private; 1730 struct trace_iterator *iter = m->private;
@@ -1963,7 +1967,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
1963 struct tracer_opt *trace_opts = current_trace->flags->opts; 1967 struct tracer_opt *trace_opts = current_trace->flags->opts;
1964 1968
1965 1969
1966 /* calulate max size */ 1970 /* calculate max size */
1967 for (i = 0; trace_options[i]; i++) { 1971 for (i = 0; trace_options[i]; i++) {
1968 len += strlen(trace_options[i]); 1972 len += strlen(trace_options[i]);
1969 len += 3; /* "no" and space */ 1973 len += 3; /* "no" and space */
@@ -2145,7 +2149,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2145{ 2149{
2146 struct trace_array *tr = filp->private_data; 2150 struct trace_array *tr = filp->private_data;
2147 char buf[64]; 2151 char buf[64];
2148 long val; 2152 unsigned long val;
2149 int ret; 2153 int ret;
2150 2154
2151 if (cnt >= sizeof(buf)) 2155 if (cnt >= sizeof(buf))
@@ -2293,9 +2297,9 @@ static ssize_t
2293tracing_max_lat_write(struct file *filp, const char __user *ubuf, 2297tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2294 size_t cnt, loff_t *ppos) 2298 size_t cnt, loff_t *ppos)
2295{ 2299{
2296 long *ptr = filp->private_data; 2300 unsigned long *ptr = filp->private_data;
2297 char buf[64]; 2301 char buf[64];
2298 long val; 2302 unsigned long val;
2299 int ret; 2303 int ret;
2300 2304
2301 if (cnt >= sizeof(buf)) 2305 if (cnt >= sizeof(buf))
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index f8ae2c50e01d..c2e68d440c4d 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -91,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
91 91
92int enable_branch_tracing(struct trace_array *tr) 92int enable_branch_tracing(struct trace_array *tr)
93{ 93{
94 int ret = 0;
95
96 mutex_lock(&branch_tracing_mutex); 94 mutex_lock(&branch_tracing_mutex);
97 branch_tracer = tr; 95 branch_tracer = tr;
98 /* 96 /*
@@ -103,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
103 branch_tracing_enabled++; 101 branch_tracing_enabled++;
104 mutex_unlock(&branch_tracing_mutex); 102 mutex_unlock(&branch_tracing_mutex);
105 103
106 return ret; 104 return 0;
107} 105}
108 106
109void disable_branch_tracing(void) 107void disable_branch_tracing(void)
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 782ec0fdf453..519a0cab1530 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -186,30 +186,30 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
186 ret = trace_seq_printf(s, 186 ret = trace_seq_printf(s,
187 " ------------------------------------------\n"); 187 " ------------------------------------------\n");
188 if (!ret) 188 if (!ret)
189 TRACE_TYPE_PARTIAL_LINE; 189 return TRACE_TYPE_PARTIAL_LINE;
190 190
191 ret = print_graph_cpu(s, cpu); 191 ret = print_graph_cpu(s, cpu);
192 if (ret == TRACE_TYPE_PARTIAL_LINE) 192 if (ret == TRACE_TYPE_PARTIAL_LINE)
193 TRACE_TYPE_PARTIAL_LINE; 193 return TRACE_TYPE_PARTIAL_LINE;
194 194
195 ret = print_graph_proc(s, prev_pid); 195 ret = print_graph_proc(s, prev_pid);
196 if (ret == TRACE_TYPE_PARTIAL_LINE) 196 if (ret == TRACE_TYPE_PARTIAL_LINE)
197 TRACE_TYPE_PARTIAL_LINE; 197 return TRACE_TYPE_PARTIAL_LINE;
198 198
199 ret = trace_seq_printf(s, " => "); 199 ret = trace_seq_printf(s, " => ");
200 if (!ret) 200 if (!ret)
201 TRACE_TYPE_PARTIAL_LINE; 201 return TRACE_TYPE_PARTIAL_LINE;
202 202
203 ret = print_graph_proc(s, pid); 203 ret = print_graph_proc(s, pid);
204 if (ret == TRACE_TYPE_PARTIAL_LINE) 204 if (ret == TRACE_TYPE_PARTIAL_LINE)
205 TRACE_TYPE_PARTIAL_LINE; 205 return TRACE_TYPE_PARTIAL_LINE;
206 206
207 ret = trace_seq_printf(s, 207 ret = trace_seq_printf(s,
208 "\n ------------------------------------------\n\n"); 208 "\n ------------------------------------------\n\n");
209 if (!ret) 209 if (!ret)
210 TRACE_TYPE_PARTIAL_LINE; 210 return TRACE_TYPE_PARTIAL_LINE;
211 211
212 return ret; 212 return TRACE_TYPE_HANDLED;
213} 213}
214 214
215static struct ftrace_graph_ret_entry * 215static struct ftrace_graph_ret_entry *
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index e3e7db61c067..0794dd33f27b 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr)
75} 75}
76 76
77/* 77/*
78 * Start tracing on the current cpu. 78 * Stop tracing on the current cpu.
79 * The argument is ignored. 79 * The argument is ignored.
80 * 80 *
81 * pre: bts_tracer_mutex must be locked. 81 * pre: bts_tracer_mutex must be locked.
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 9902c15997ad..7c9a2d82a7d8 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
88 } 88 }
89} 89}
90 90
91const static struct stacktrace_ops backtrace_ops = { 91static const struct stacktrace_ops backtrace_ops = {
92 .warning = backtrace_warning, 92 .warning = backtrace_warning,
93 .warning_symbol = backtrace_warning_symbol, 93 .warning_symbol = backtrace_warning_symbol,
94 .stack = backtrace_stack, 94 .stack = backtrace_stack,