aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
committerDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
commit4eebf60b7452fbd551fd7dece855ba7825a49cbc (patch)
tree490b4d194ba09c90e10201ab7fc084a0bda0ed27 /kernel
parent8f9cb50789e76f3e224e8861adf650e55c747af4 (diff)
parent2c6625cd545bdd66acff14f3394865d43920a5c7 (diff)
Merge tag 'v4.2-rc7' into drm-next
Linux 4.2-rc7 Backmerge master for i915 fixes
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/trace/ftrace.c52
8 files changed, 138 insertions, 57 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
1868 1868
1869 perf_pmu_disable(event->pmu); 1869 perf_pmu_disable(event->pmu);
1870 1870
1871 event->tstamp_running += tstamp - event->tstamp_stopped;
1872
1873 perf_set_shadow_time(event, ctx, tstamp); 1871 perf_set_shadow_time(event, ctx, tstamp);
1874 1872
1875 perf_log_itrace_start(event); 1873 perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
1881 goto out; 1879 goto out;
1882 } 1880 }
1883 1881
1882 event->tstamp_running += tstamp - event->tstamp_stopped;
1883
1884 if (!is_software_event(event)) 1884 if (!is_software_event(event))
1885 cpuctx->active_oncpu++; 1885 cpuctx->active_oncpu++;
1886 if (!ctx->nr_active++) 1886 if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
3958 perf_event_for_each_child(sibling, func); 3958 perf_event_for_each_child(sibling, func);
3959} 3959}
3960 3960
3961static int perf_event_period(struct perf_event *event, u64 __user *arg) 3961struct period_event {
3962{ 3962 struct perf_event *event;
3963 struct perf_event_context *ctx = event->ctx;
3964 int ret = 0, active;
3965 u64 value; 3963 u64 value;
3964};
3966 3965
3967 if (!is_sampling_event(event)) 3966static int __perf_event_period(void *info)
3968 return -EINVAL; 3967{
3969 3968 struct period_event *pe = info;
3970 if (copy_from_user(&value, arg, sizeof(value))) 3969 struct perf_event *event = pe->event;
3971 return -EFAULT; 3970 struct perf_event_context *ctx = event->ctx;
3972 3971 u64 value = pe->value;
3973 if (!value) 3972 bool active;
3974 return -EINVAL;
3975 3973
3976 raw_spin_lock_irq(&ctx->lock); 3974 raw_spin_lock(&ctx->lock);
3977 if (event->attr.freq) { 3975 if (event->attr.freq) {
3978 if (value > sysctl_perf_event_sample_rate) {
3979 ret = -EINVAL;
3980 goto unlock;
3981 }
3982
3983 event->attr.sample_freq = value; 3976 event->attr.sample_freq = value;
3984 } else { 3977 } else {
3985 event->attr.sample_period = value; 3978 event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3998 event->pmu->start(event, PERF_EF_RELOAD); 3991 event->pmu->start(event, PERF_EF_RELOAD);
3999 perf_pmu_enable(ctx->pmu); 3992 perf_pmu_enable(ctx->pmu);
4000 } 3993 }
3994 raw_spin_unlock(&ctx->lock);
4001 3995
4002unlock: 3996 return 0;
3997}
3998
3999static int perf_event_period(struct perf_event *event, u64 __user *arg)
4000{
4001 struct period_event pe = { .event = event, };
4002 struct perf_event_context *ctx = event->ctx;
4003 struct task_struct *task;
4004 u64 value;
4005
4006 if (!is_sampling_event(event))
4007 return -EINVAL;
4008
4009 if (copy_from_user(&value, arg, sizeof(value)))
4010 return -EFAULT;
4011
4012 if (!value)
4013 return -EINVAL;
4014
4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4016 return -EINVAL;
4017
4018 task = ctx->task;
4019 pe.value = value;
4020
4021 if (!task) {
4022 cpu_function_call(event->cpu, __perf_event_period, &pe);
4023 return 0;
4024 }
4025
4026retry:
4027 if (!task_function_call(task, __perf_event_period, &pe))
4028 return 0;
4029
4030 raw_spin_lock_irq(&ctx->lock);
4031 if (ctx->is_active) {
4032 raw_spin_unlock_irq(&ctx->lock);
4033 task = ctx->task;
4034 goto retry;
4035 }
4036
4037 __perf_event_period(&pe);
4003 raw_spin_unlock_irq(&ctx->lock); 4038 raw_spin_unlock_irq(&ctx->lock);
4004 4039
4005 return ret; 4040 return 0;
4006} 4041}
4007 4042
4008static const struct file_operations perf_fops; 4043static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
4740 * to user-space before waking everybody up. 4775 * to user-space before waking everybody up.
4741 */ 4776 */
4742 4777
4778static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4779{
4780 /* only the parent has fasync state */
4781 if (event->parent)
4782 event = event->parent;
4783 return &event->fasync;
4784}
4785
4743void perf_event_wakeup(struct perf_event *event) 4786void perf_event_wakeup(struct perf_event *event)
4744{ 4787{
4745 ring_buffer_wakeup(event); 4788 ring_buffer_wakeup(event);
4746 4789
4747 if (event->pending_kill) { 4790 if (event->pending_kill) {
4748 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
4749 event->pending_kill = 0; 4792 event->pending_kill = 0;
4750 } 4793 }
4751} 4794}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
6124 else 6167 else
6125 perf_event_output(event, data, regs); 6168 perf_event_output(event, data, regs);
6126 6169
6127 if (event->fasync && event->pending_kill) { 6170 if (*perf_event_fasync(event) && event->pending_kill) {
6128 event->pending_wakeup = 1; 6171 event->pending_wakeup = 1;
6129 irq_work_queue(&event->pending); 6172 irq_work_queue(&event->pending);
6130 } 6173 }
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
559 rb->aux_priv = NULL; 559 rb->aux_priv = NULL;
560 } 560 }
561 561
562 for (pg = 0; pg < rb->aux_nr_pages; pg++) 562 if (rb->aux_nr_pages) {
563 rb_free_aux_page(rb, pg); 563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
564 565
565 kfree(rb->aux_pages); 566 kfree(rb->aux_pages);
566 rb->aux_nr_pages = 0; 567 rb->aux_nr_pages = 0;
568 }
567} 569}
568 570
569void rb_free_aux(struct ring_buffer *rb) 571void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/hash.h> 5#include <linux/hash.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/debug_locks.h>
7 8
8/* 9/*
9 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
286{ 287{
287 struct __qspinlock *l = (void *)lock; 288 struct __qspinlock *l = (void *)lock;
288 struct pv_node *node; 289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
289 291
290 /* 292 /*
291 * We must not unlock if SLOW, because in that case we must first 293 * We must not unlock if SLOW, because in that case we must first
292 * unhash. Otherwise it would be possible to have multiple @lock 294 * unhash. Otherwise it would be possible to have multiple @lock
293 * entries, which would be BAD. 295 * entries, which would be BAD.
294 */ 296 */
295 if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) 297 if (likely(lockval == _Q_LOCKED_VAL))
296 return; 298 return;
297 299
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
302 return;
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
304 return;
305 }
306
298 /* 307 /*
299 * Since the above failed to release, this must be the SLOW path. 308 * Since the above failed to release, this must be the SLOW path.
300 * Therefore start by looking up the blocked node and unhashing it. 309 * Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
602} 602}
603EXPORT_SYMBOL_GPL(find_symbol); 603EXPORT_SYMBOL_GPL(find_symbol);
604 604
605/* Search for module by name: must hold module_mutex. */ 605/*
606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access).
608 */
606static struct module *find_module_all(const char *name, size_t len, 609static struct module *find_module_all(const char *name, size_t len,
607 bool even_unformed) 610 bool even_unformed)
608{ 611{
609 struct module *mod; 612 struct module *mod;
610 613
611 module_assert_mutex(); 614 module_assert_mutex_or_preempt();
612 615
613 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
621 624
622struct module *find_module(const char *name) 625struct module *find_module(const char *name)
623{ 626{
627 module_assert_mutex();
624 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
625} 629}
626EXPORT_SYMBOL_GPL(find_module); 630EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
504{ 504{
505 struct resource *p; 505 struct resource *p;
506 resource_size_t end = start + size - 1; 506 resource_size_t end = start + size - 1;
507 int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508 const char *name = "System RAM"; 508 const char *name = "System RAM";
509 int ret = -1; 509 int ret = -1;
510 510
511 read_lock(&resource_lock); 511 read_lock(&resource_lock);
512 for (p = iomem_resource.child; p ; p = p->sibling) { 512 for (p = iomem_resource.child; p ; p = p->sibling) {
513 if (end < p->start) 513 if (p->end < start)
514 continue; 514 continue;
515 515
516 if (p->start <= start && end <= p->end) { 516 if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
521 ret = 1; 521 ret = 1;
522 break; 522 break;
523 } 523 }
524 if (p->end < start) 524 if (end < p->start)
525 break; /* not found */ 525 break; /* not found */
526 } 526 }
527 read_unlock(&resource_lock); 527 read_unlock(&resource_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
98 struct pid *pid; 98 struct pid *pid;
99}; 99};
100 100
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
101/* 108/*
102 * ftrace_disabled is set when an anomaly is discovered. 109 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled. 110 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 119static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 120static struct ftrace_ops control_ops;
115 121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183 if (!test_tsk_trace_trace(current)) 189 if (!test_tsk_trace_trace(current))
184 return; 190 return;
185 191
186 ftrace_pid_function(ip, parent_ip, op, regs); 192 op->saved_func(ip, parent_ip, op, regs);
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194} 193}
195 194
196/** 195/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202void clear_ftrace_function(void) 201void clear_ftrace_function(void)
203{ 202{
204 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206} 204}
207 205
208static void control_ops_disable_all(struct ftrace_ops *ops) 206static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436 } else 434 } else
437 add_ftrace_ops(&ftrace_ops_list, ops); 435 add_ftrace_ops(&ftrace_ops_list, ops);
438 436
437 /* Always save the function, and reset at unregistering */
438 ops->saved_func = ops->func;
439
440 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
441 ops->func = ftrace_pid_func;
442
439 ftrace_update_trampoline(ops); 443 ftrace_update_trampoline(ops);
440 444
441 if (ftrace_enabled) 445 if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463 if (ftrace_enabled) 467 if (ftrace_enabled)
464 update_ftrace_function(); 468 update_ftrace_function();
465 469
470 ops->func = ops->saved_func;
471
466 return 0; 472 return 0;
467} 473}
468 474
469static void ftrace_update_pid_func(void) 475static void ftrace_update_pid_func(void)
470{ 476{
477 bool enabled = ftrace_pids_enabled();
478 struct ftrace_ops *op;
479
471 /* Only do something if we are tracing something */ 480 /* Only do something if we are tracing something */
472 if (ftrace_trace_function == ftrace_stub) 481 if (ftrace_trace_function == ftrace_stub)
473 return; 482 return;
474 483
484 do_for_each_ftrace_op(op, ftrace_ops_list) {
485 if (op->flags & FTRACE_OPS_FL_PID) {
486 op->func = enabled ? ftrace_pid_func :
487 op->saved_func;
488 ftrace_update_trampoline(op);
489 }
490 } while_for_each_ftrace_op(op);
491
475 update_ftrace_function(); 492 update_ftrace_function();
476} 493}
477 494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133 .local_hash.filter_hash = EMPTY_HASH, 1150 .local_hash.filter_hash = EMPTY_HASH,
1134 INIT_OPS_HASH(global_ops) 1151 INIT_OPS_HASH(global_ops)
1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1152 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136 FTRACE_OPS_FL_INITIALIZED, 1153 FTRACE_OPS_FL_INITIALIZED |
1154 FTRACE_OPS_FL_PID,
1137}; 1155};
1138 1156
1139/* 1157/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023 5041
5024static struct ftrace_ops global_ops = { 5042static struct ftrace_ops global_ops = {
5025 .func = ftrace_stub, 5043 .func = ftrace_stub,
5026 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5044 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045 FTRACE_OPS_FL_INITIALIZED |
5046 FTRACE_OPS_FL_PID,
5027}; 5047};
5028 5048
5029static int __init ftrace_nodyn_init(void) 5049static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5100 if (WARN_ON(tr->ops->func != ftrace_stub))
5081 printk("ftrace ops had %pS for function\n", 5101 printk("ftrace ops had %pS for function\n",
5082 tr->ops->func); 5102 tr->ops->func);
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids)) {
5085 set_ftrace_pid_function(func);
5086 func = ftrace_pid_func;
5087 }
5088 } 5103 }
5089 tr->ops->func = func; 5104 tr->ops->func = func;
5090 tr->ops->private = tr; 5105 tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371{ 5386{
5372 mutex_lock(&ftrace_lock); 5387 mutex_lock(&ftrace_lock);
5373 5388
5374 if (list_empty(&ftrace_pids) && (!*pos)) 5389 if (!ftrace_pids_enabled() && (!*pos))
5375 return (void *) 1; 5390 return (void *) 1;
5376 5391
5377 return seq_list_start(&ftrace_pids, *pos); 5392 return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610 .func = ftrace_stub, 5625 .func = ftrace_stub,
5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5626 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612 FTRACE_OPS_FL_INITIALIZED | 5627 FTRACE_OPS_FL_INITIALIZED |
5628 FTRACE_OPS_FL_PID |
5613 FTRACE_OPS_FL_STUB, 5629 FTRACE_OPS_FL_STUB,
5614#ifdef FTRACE_GRAPH_TRAMP_ADDR 5630#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,