aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec_file.c8
-rw-r--r--kernel/kexec_internal.h6
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/locking/test-ww_mutex.c6
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/cpufreq_schedutil.c19
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
-rw-r--r--kernel/sched/wait.c39
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_probe.h4
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/ucount.c18
20 files changed, 124 insertions, 57 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0125589c7428..48851327a15e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
2669 * 2669 *
2670 * Returns 0 on success, -errno on failure. On failure, csses which have 2670 * Returns 0 on success, -errno on failure. On failure, csses which have
2671 * been processed already aren't cleaned up. The caller is responsible for 2671 * been processed already aren't cleaned up. The caller is responsible for
2672 * cleaning up with cgroup_apply_control_disble(). 2672 * cleaning up with cgroup_apply_control_disable().
2673 */ 2673 */
2674static int cgroup_apply_control_enable(struct cgroup *cgrp) 2674static int cgroup_apply_control_enable(struct cgroup *cgrp)
2675{ 2675{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6f41548f2e32..a17ed56c8ce1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
998 */ 998 */
999#define PERF_CPU_HRTIMER (1000 / HZ) 999#define PERF_CPU_HRTIMER (1000 / HZ)
1000/* 1000/*
1001 * function must be called with interrupts disbled 1001 * function must be called with interrupts disabled
1002 */ 1002 */
1003static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 1003static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1004{ 1004{
diff --git a/kernel/exit.c b/kernel/exit.c
index e126ebf2400c..516acdb0e0ec 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -554,7 +554,6 @@ static void exit_mm(void)
554 enter_lazy_tlb(mm, current); 554 enter_lazy_tlb(mm, current);
555 task_unlock(current); 555 task_unlock(current);
556 mm_update_next_owner(mm); 556 mm_update_next_owner(mm);
557 userfaultfd_exit(mm);
558 mmput(mm); 557 mmput(mm);
559 if (test_thread_flag(TIF_MEMDIE)) 558 if (test_thread_flag(TIF_MEMDIE))
560 exit_oom_victim(); 559 exit_oom_victim();
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index b56a558e406d..b118735fea9d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image)
614 ret = crypto_shash_final(desc, digest); 614 ret = crypto_shash_final(desc, digest);
615 if (ret) 615 if (ret)
616 goto out_free_digest; 616 goto out_free_digest;
617 ret = kexec_purgatory_get_set_symbol(image, "sha_regions", 617 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
618 sha_regions, sha_region_sz, 0); 618 sha_regions, sha_region_sz, 0);
619 if (ret) 619 if (ret)
620 goto out_free_digest; 620 goto out_free_digest;
621 621
622 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", 622 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
623 digest, SHA256_DIGEST_SIZE, 0); 623 digest, SHA256_DIGEST_SIZE, 0);
624 if (ret) 624 if (ret)
625 goto out_free_digest; 625 goto out_free_digest;
626 } 626 }
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 4cef7e4706b0..799a8a452187 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image,
15extern struct mutex kexec_mutex; 15extern struct mutex kexec_mutex;
16 16
17#ifdef CONFIG_KEXEC_FILE 17#ifdef CONFIG_KEXEC_FILE
18struct kexec_sha_region { 18#include <linux/purgatory.h>
19 unsigned long start;
20 unsigned long len;
21};
22
23void kimage_file_post_load_cleanup(struct kimage *image); 19void kimage_file_post_load_cleanup(struct kimage *image);
24#else /* CONFIG_KEXEC_FILE */ 20#else /* CONFIG_KEXEC_FILE */
25static inline void kimage_file_post_load_cleanup(struct kimage *image) { } 21static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 12e38c213b70..a95e5d1f4a9c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3262 if (depth) { 3262 if (depth) {
3263 hlock = curr->held_locks + depth - 1; 3263 hlock = curr->held_locks + depth - 1;
3264 if (hlock->class_idx == class_idx && nest_lock) { 3264 if (hlock->class_idx == class_idx && nest_lock) {
3265 if (hlock->references) 3265 if (hlock->references) {
3266 /*
3267 * Check: unsigned int references:12, overflow.
3268 */
3269 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3270 return 0;
3271
3266 hlock->references++; 3272 hlock->references++;
3267 else 3273 } else {
3268 hlock->references = 2; 3274 hlock->references = 2;
3275 }
3269 3276
3270 return 1; 3277 return 1;
3271 } 3278 }
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index da6c9a34f62f..6b7abb334ca6 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
50 50
51 if (mtx->flags & TEST_MTX_TRY) { 51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex)) 52 while (!ww_mutex_trylock(&mtx->mutex))
53 cpu_relax(); 53 cond_resched();
54 } else { 54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL); 55 ww_mutex_lock(&mtx->mutex, NULL);
56 } 56 }
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
88 ret = -EINVAL; 88 ret = -EINVAL;
89 break; 89 break;
90 } 90 }
91 cpu_relax(); 91 cond_resched();
92 } while (time_before(jiffies, timeout)); 92 } while (time_before(jiffies, timeout));
93 } else { 93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); 94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
627 if (ret) 627 if (ret)
628 return ret; 628 return ret;
629 629
630 ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); 630 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
631 if (ret) 631 if (ret)
632 return ret; 632 return ret;
633 633
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3287 struct task_struct *p; 3287 struct task_struct *p;
3288 3288
3289 /* 3289 /*
3290 * Optimization: we know that if all tasks are in 3290 * Optimization: we know that if all tasks are in the fair class we can
3291 * the fair class we can call that function directly: 3291 * call that function directly, but only if the @prev task wasn't of a
3292 * higher scheduling class, because otherwise those loose the
3293 * opportunity to pull in more work from other CPUs.
3292 */ 3294 */
3293 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3295 if (likely((prev->sched_class == &idle_sched_class ||
3296 prev->sched_class == &fair_sched_class) &&
3297 rq->nr_running == rq->cfs.h_nr_running)) {
3298
3294 p = fair_sched_class.pick_next_task(rq, prev, rf); 3299 p = fair_sched_class.pick_next_task(rq, prev, rf);
3295 if (unlikely(p == RETRY_TASK)) 3300 if (unlikely(p == RETRY_TASK))
3296 goto again; 3301 goto again;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 8f8de3d4d6b7..cd7cd489f739 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,7 @@ struct sugov_policy {
36 u64 last_freq_update_time; 36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns; 37 s64 freq_update_delay_ns;
38 unsigned int next_freq; 38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
39 40
40 /* The next fields are only needed if fast switch cannot be used. */ 41 /* The next fields are only needed if fast switch cannot be used. */
41 struct irq_work irq_work; 42 struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
52 struct update_util_data update_util; 53 struct update_util_data update_util;
53 struct sugov_policy *sg_policy; 54 struct sugov_policy *sg_policy;
54 55
55 unsigned int cached_raw_freq;
56 unsigned long iowait_boost; 56 unsigned long iowait_boost;
57 unsigned long iowait_boost_max; 57 unsigned long iowait_boost_max;
58 u64 last_update; 58 u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
116 116
117/** 117/**
118 * get_next_freq - Compute a new frequency for a given cpufreq policy. 118 * get_next_freq - Compute a new frequency for a given cpufreq policy.
119 * @sg_cpu: schedutil cpu object to compute the new frequency for. 119 * @sg_policy: schedutil policy object to compute the new frequency for.
120 * @util: Current CPU utilization. 120 * @util: Current CPU utilization.
121 * @max: CPU capacity. 121 * @max: CPU capacity.
122 * 122 *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
136 * next_freq (as calculated above) is returned, subject to policy min/max and 136 * next_freq (as calculated above) is returned, subject to policy min/max and
137 * cpufreq driver limitations. 137 * cpufreq driver limitations.
138 */ 138 */
139static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, 139static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140 unsigned long max) 140 unsigned long util, unsigned long max)
141{ 141{
142 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
143 struct cpufreq_policy *policy = sg_policy->policy; 142 struct cpufreq_policy *policy = sg_policy->policy;
144 unsigned int freq = arch_scale_freq_invariant() ? 143 unsigned int freq = arch_scale_freq_invariant() ?
145 policy->cpuinfo.max_freq : policy->cur; 144 policy->cpuinfo.max_freq : policy->cur;
146 145
147 freq = (freq + (freq >> 2)) * util / max; 146 freq = (freq + (freq >> 2)) * util / max;
148 147
149 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 148 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
150 return sg_policy->next_freq; 149 return sg_policy->next_freq;
151 sg_cpu->cached_raw_freq = freq; 150 sg_policy->cached_raw_freq = freq;
152 return cpufreq_driver_resolve_freq(policy, freq); 151 return cpufreq_driver_resolve_freq(policy, freq);
153} 152}
154 153
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
213 } else { 212 } else {
214 sugov_get_util(&util, &max); 213 sugov_get_util(&util, &max);
215 sugov_iowait_boost(sg_cpu, &util, &max); 214 sugov_iowait_boost(sg_cpu, &util, &max);
216 next_f = get_next_freq(sg_cpu, util, max); 215 next_f = get_next_freq(sg_policy, util, max);
217 } 216 }
218 sugov_update_commit(sg_policy, time, next_f); 217 sugov_update_commit(sg_policy, time, next_f);
219} 218}
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
267 sugov_iowait_boost(j_sg_cpu, &util, &max); 266 sugov_iowait_boost(j_sg_cpu, &util, &max);
268 } 267 }
269 268
270 return get_next_freq(sg_cpu, util, max); 269 return get_next_freq(sg_policy, util, max);
271} 270}
272 271
273static void sugov_update_shared(struct update_util_data *hook, u64 time, 272static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
580 sg_policy->next_freq = UINT_MAX; 579 sg_policy->next_freq = UINT_MAX;
581 sg_policy->work_in_progress = false; 580 sg_policy->work_in_progress = false;
582 sg_policy->need_freq_update = false; 581 sg_policy->need_freq_update = false;
582 sg_policy->cached_raw_freq = 0;
583 583
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
590 sg_cpu->max = 0; 590 sg_cpu->max = 0;
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 591 sg_cpu->flags = SCHED_CPUFREQ_RT;
592 sg_cpu->last_update = 0; 592 sg_cpu->last_update = 0;
593 sg_cpu->cached_raw_freq = 0;
594 sg_cpu->iowait_boost = 0; 593 sg_cpu->iowait_boost = 0;
595 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
596 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5799 * Due to large variance we need a large fuzz factor; hackbench in 5799 * Due to large variance we need a large fuzz factor; hackbench in
5800 * particularly is sensitive here. 5800 * particularly is sensitive here.
5801 */ 5801 */
5802 if ((avg_idle / 512) < avg_cost) 5802 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5803 return -1; 5803 return -1;
5804 5804
5805 time = local_clock(); 5805 time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
51 */ 51 */
52SCHED_FEAT(TTWU_QUEUE, true) 52SCHED_FEAT(TTWU_QUEUE, true)
53 53
54/*
55 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
56 */
57SCHED_FEAT(SIS_AVG_CPU, false)
58
54#ifdef HAVE_RT_PUSH_IPI 59#ifdef HAVE_RT_PUSH_IPI
55/* 60/*
56 * In order to avoid a thundering herd attack of CPUs that are 61 * In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4d2ea6f25568..b8c84c6dee64 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
242} 242}
243EXPORT_SYMBOL(prepare_to_wait_event); 243EXPORT_SYMBOL(prepare_to_wait_event);
244 244
245/*
246 * Note! These two wait functions are entered with the
247 * wait-queue lock held (and interrupts off in the _irq
248 * case), so there is no race with testing the wakeup
249 * condition in the caller before they add the wait
250 * entry to the wake queue.
251 */
252int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
253{
254 if (likely(list_empty(&wait->task_list)))
255 __add_wait_queue_tail(wq, wait);
256
257 set_current_state(TASK_INTERRUPTIBLE);
258 if (signal_pending(current))
259 return -ERESTARTSYS;
260
261 spin_unlock(&wq->lock);
262 schedule();
263 spin_lock(&wq->lock);
264 return 0;
265}
266EXPORT_SYMBOL(do_wait_intr);
267
268int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
269{
270 if (likely(list_empty(&wait->task_list)))
271 __add_wait_queue_tail(wq, wait);
272
273 set_current_state(TASK_INTERRUPTIBLE);
274 if (signal_pending(current))
275 return -ERESTARTSYS;
276
277 spin_unlock_irq(&wq->lock);
278 schedule();
279 spin_lock_irq(&wq->lock);
280 return 0;
281}
282EXPORT_SYMBOL(do_wait_intr_irq);
283
245/** 284/**
246 * finish_wait - clean up after waiting in a queue 285 * finish_wait - clean up after waiting in a queue
247 * @q: waitqueue waited on 286 * @q: waitqueue waited on
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7906b3f0c41a..497719127bf9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
125 shift_hz += cycles_per_tick/2; 125 shift_hz += cycles_per_tick/2;
126 do_div(shift_hz, cycles_per_tick); 126 do_div(shift_hz, cycles_per_tick);
127 /* Calculate nsec_per_tick using shift_hz */ 127 /* Calculate nsec_per_tick using shift_hz */
128 nsec_per_tick = (u64)TICK_NSEC << 8; 128 nsec_per_tick = (u64)NSEC_PER_SEC << 8;
129 nsec_per_tick += (u32)shift_hz/2; 129 nsec_per_tick += (u32)shift_hz/2;
130 do_div(nsec_per_tick, (u32)shift_hz); 130 do_div(nsec_per_tick, (u32)shift_hz);
131 131
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d5038005eb5d..d4a06e714645 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
429 429
430 If unsure, say N. 430 If unsure, say N.
431 431
432config KPROBE_EVENT 432config KPROBE_EVENTS
433 depends on KPROBES 433 depends on KPROBES
434 depends on HAVE_REGS_AND_STACK_ACCESS_API 434 depends on HAVE_REGS_AND_STACK_ACCESS_API
435 bool "Enable kprobes-based dynamic events" 435 bool "Enable kprobes-based dynamic events"
@@ -447,7 +447,7 @@ config KPROBE_EVENT
447 This option is also required by perf-probe subcommand of perf tools. 447 This option is also required by perf-probe subcommand of perf tools.
448 If you want to use perf tools, this option is strongly recommended. 448 If you want to use perf tools, this option is strongly recommended.
449 449
450config UPROBE_EVENT 450config UPROBE_EVENTS
451 bool "Enable uprobes-based dynamic events" 451 bool "Enable uprobes-based dynamic events"
452 depends on ARCH_SUPPORTS_UPROBES 452 depends on ARCH_SUPPORTS_UPROBES
453 depends on MMU 453 depends on MMU
@@ -466,7 +466,7 @@ config UPROBE_EVENT
466 466
467config BPF_EVENTS 467config BPF_EVENTS
468 depends on BPF_SYSCALL 468 depends on BPF_SYSCALL
469 depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS 469 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
470 bool 470 bool
471 default y 471 default y
472 help 472 help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index e57980845549..90f2701d92a7 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
57obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
58obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o 58obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
59obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o 59obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
60obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 60obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
61obj-$(CONFIG_TRACEPOINTS) += power-traces.o 61obj-$(CONFIG_TRACEPOINTS) += power-traces.o
62ifeq ($(CONFIG_PM),y) 62ifeq ($(CONFIG_PM),y)
63obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o 63obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
66obj-$(CONFIG_KGDB_KDB) += trace_kdb.o 66obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
67endif 67endif
68obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o 68obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
69obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o 69obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
70 70
71obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o 71obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
72 72
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d1597c9ee30..b9691ee8f6c1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
4416} 4416}
4417__setup("ftrace_graph_notrace=", set_graph_notrace_function); 4417__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4418 4418
4419static int __init set_graph_max_depth_function(char *str)
4420{
4421 if (!str)
4422 return 0;
4423 fgraph_max_depth = simple_strtoul(str, NULL, 0);
4424 return 1;
4425}
4426__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4427
4419static void __init set_ftrace_early_graph(char *buf, int enable) 4428static void __init set_ftrace_early_graph(char *buf, int enable)
4420{ 4429{
4421 int ret; 4430 int ret;
4422 char *func; 4431 char *func;
4423 struct ftrace_hash *hash; 4432 struct ftrace_hash *hash;
4424 4433
4425 if (enable) 4434 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4426 hash = ftrace_graph_hash; 4435 if (WARN_ON(!hash))
4427 else 4436 return;
4428 hash = ftrace_graph_notrace_hash;
4429 4437
4430 while (buf) { 4438 while (buf) {
4431 func = strsep(&buf, ","); 4439 func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
4435 printk(KERN_DEBUG "ftrace: function %s not " 4443 printk(KERN_DEBUG "ftrace: function %s not "
4436 "traceable\n", func); 4444 "traceable\n", func);
4437 } 4445 }
4446
4447 if (enable)
4448 ftrace_graph_hash = hash;
4449 else
4450 ftrace_graph_notrace_hash = hash;
4438} 4451}
4439#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4452#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4440 4453
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5488 * Normally the mcount trampoline will call the ops->func, but there 5501 * Normally the mcount trampoline will call the ops->func, but there
5489 * are times that it should not. For example, if the ops does not 5502 * are times that it should not. For example, if the ops does not
5490 * have its own recursion protection, then it should call the 5503 * have its own recursion protection, then it should call the
5491 * ftrace_ops_recurs_func() instead. 5504 * ftrace_ops_assist_func() instead.
5492 * 5505 *
5493 * Returns the function that the trampoline should call for @ops. 5506 * Returns the function that the trampoline should call for @ops.
5494 */ 5507 */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 707445ceb7ef..f35109514a01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4341,22 +4341,22 @@ static const char readme_msg[] =
4341 "\t\t\t traces\n" 4341 "\t\t\t traces\n"
4342#endif 4342#endif
4343#endif /* CONFIG_STACK_TRACER */ 4343#endif /* CONFIG_STACK_TRACER */
4344#ifdef CONFIG_KPROBE_EVENT 4344#ifdef CONFIG_KPROBE_EVENTS
4345 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" 4345 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4346 "\t\t\t Write into this file to define/undefine new trace events.\n" 4346 "\t\t\t Write into this file to define/undefine new trace events.\n"
4347#endif 4347#endif
4348#ifdef CONFIG_UPROBE_EVENT 4348#ifdef CONFIG_UPROBE_EVENTS
4349 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" 4349 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4350 "\t\t\t Write into this file to define/undefine new trace events.\n" 4350 "\t\t\t Write into this file to define/undefine new trace events.\n"
4351#endif 4351#endif
4352#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT) 4352#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4353 "\t accepts: event-definitions (one definition per line)\n" 4353 "\t accepts: event-definitions (one definition per line)\n"
4354 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" 4354 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4355 "\t -:[<group>/]<event>\n" 4355 "\t -:[<group>/]<event>\n"
4356#ifdef CONFIG_KPROBE_EVENT 4356#ifdef CONFIG_KPROBE_EVENTS
4357 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 4357 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4358#endif 4358#endif
4359#ifdef CONFIG_UPROBE_EVENT 4359#ifdef CONFIG_UPROBE_EVENTS
4360 "\t place: <path>:<offset>\n" 4360 "\t place: <path>:<offset>\n"
4361#endif 4361#endif
4362 "\t args: <name>=fetcharg[:type]\n" 4362 "\t args: <name>=fetcharg[:type]\n"
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 0c0ae54d44c6..903273c93e61 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \
248#define FETCH_TYPE_STRING 0 248#define FETCH_TYPE_STRING 0
249#define FETCH_TYPE_STRSIZE 1 249#define FETCH_TYPE_STRSIZE 1
250 250
251#ifdef CONFIG_KPROBE_EVENT 251#ifdef CONFIG_KPROBE_EVENTS
252struct symbol_cache; 252struct symbol_cache;
253unsigned long update_symbol_cache(struct symbol_cache *sc); 253unsigned long update_symbol_cache(struct symbol_cache *sc);
254void free_symbol_cache(struct symbol_cache *sc); 254void free_symbol_cache(struct symbol_cache *sc);
@@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
278{ 278{
279 return NULL; 279 return NULL;
280} 280}
281#endif /* CONFIG_KPROBE_EVENT */ 281#endif /* CONFIG_KPROBE_EVENTS */
282 282
283struct probe_arg { 283struct probe_arg {
284 struct fetch_param fetch; 284 struct fetch_param fetch;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1d68b5b7ad41..5fb1f2c87e6b 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -65,7 +65,7 @@ void stack_trace_print(void)
65} 65}
66 66
67/* 67/*
68 * When arch-specific code overides this function, the following 68 * When arch-specific code overrides this function, the following
69 * data should be filled up, assuming stack_trace_max_lock is held to 69 * data should be filled up, assuming stack_trace_max_lock is held to
70 * prevent concurrent updates. 70 * prevent concurrent updates.
71 * stack_trace_index[] 71 * stack_trace_index[]
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a40ab3a..b4eeee03934f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
144 144
145 new->ns = ns; 145 new->ns = ns;
146 new->uid = uid; 146 new->uid = uid;
147 atomic_set(&new->count, 0); 147 new->count = 0;
148 148
149 spin_lock_irq(&ucounts_lock); 149 spin_lock_irq(&ucounts_lock);
150 ucounts = find_ucounts(ns, uid, hashent); 150 ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
155 ucounts = new; 155 ucounts = new;
156 } 156 }
157 } 157 }
158 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 158 if (ucounts->count == INT_MAX)
159 ucounts = NULL; 159 ucounts = NULL;
160 else
161 ucounts->count += 1;
160 spin_unlock_irq(&ucounts_lock); 162 spin_unlock_irq(&ucounts_lock);
161 return ucounts; 163 return ucounts;
162} 164}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
165{ 167{
166 unsigned long flags; 168 unsigned long flags;
167 169
168 if (atomic_dec_and_test(&ucounts->count)) { 170 spin_lock_irqsave(&ucounts_lock, flags);
169 spin_lock_irqsave(&ucounts_lock, flags); 171 ucounts->count -= 1;
172 if (!ucounts->count)
170 hlist_del_init(&ucounts->node); 173 hlist_del_init(&ucounts->node);
171 spin_unlock_irqrestore(&ucounts_lock, flags); 174 else
175 ucounts = NULL;
176 spin_unlock_irqrestore(&ucounts_lock, flags);
172 177
173 kfree(ucounts); 178 kfree(ucounts);
174 }
175} 179}
176 180
177static inline bool atomic_inc_below(atomic_t *v, int u) 181static inline bool atomic_inc_below(atomic_t *v, int u)