diff options
Diffstat (limited to 'kernel')
54 files changed, 4231 insertions, 1637 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0b72d1a74be0..e2c9d52cfe9e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
| 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
| 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
| 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ |
| 13 | async.o range.o | 13 | async.o range.o jump_label.o |
| 14 | obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o | 14 | obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o |
| 15 | obj-y += groups.o | 15 | obj-y += groups.o |
| 16 | 16 | ||
| @@ -23,6 +23,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg | |||
| 23 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 23 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
| 24 | CFLAGS_REMOVE_sched_clock.o = -pg | 24 | CFLAGS_REMOVE_sched_clock.o = -pg |
| 25 | CFLAGS_REMOVE_perf_event.o = -pg | 25 | CFLAGS_REMOVE_perf_event.o = -pg |
| 26 | CFLAGS_REMOVE_irq_work.o = -pg | ||
| 26 | endif | 27 | endif |
| 27 | 28 | ||
| 28 | obj-$(CONFIG_FREEZER) += freezer.o | 29 | obj-$(CONFIG_FREEZER) += freezer.o |
| @@ -86,6 +87,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o | |||
| 86 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o | 87 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o |
| 87 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | 88 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
| 88 | obj-$(CONFIG_TINY_RCU) += rcutiny.o | 89 | obj-$(CONFIG_TINY_RCU) += rcutiny.o |
| 90 | obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o | ||
| 89 | obj-$(CONFIG_RELAY) += relay.o | 91 | obj-$(CONFIG_RELAY) += relay.o |
| 90 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 92 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
| 91 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 93 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
| @@ -100,6 +102,7 @@ obj-$(CONFIG_TRACING) += trace/ | |||
| 100 | obj-$(CONFIG_X86_DS) += trace/ | 102 | obj-$(CONFIG_X86_DS) += trace/ |
| 101 | obj-$(CONFIG_RING_BUFFER) += trace/ | 103 | obj-$(CONFIG_RING_BUFFER) += trace/ |
| 102 | obj-$(CONFIG_SMP) += sched_cpupri.o | 104 | obj-$(CONFIG_SMP) += sched_cpupri.o |
| 105 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | ||
| 103 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 106 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
| 104 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 107 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
| 105 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 108 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c9483d8f6140..291ba3d04bea 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -138,7 +138,7 @@ struct css_id { | |||
| 138 | * is called after synchronize_rcu(). But for safe use, css_is_removed() | 138 | * is called after synchronize_rcu(). But for safe use, css_is_removed() |
| 139 | * css_tryget() should be used for avoiding race. | 139 | * css_tryget() should be used for avoiding race. |
| 140 | */ | 140 | */ |
| 141 | struct cgroup_subsys_state *css; | 141 | struct cgroup_subsys_state __rcu *css; |
| 142 | /* | 142 | /* |
| 143 | * ID of this css. | 143 | * ID of this css. |
| 144 | */ | 144 | */ |
diff --git a/kernel/compat.c b/kernel/compat.c index e167efce8423..c9e2ec0b34a8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
| 1126 | 1126 | ||
| 1127 | return 0; | 1127 | return 0; |
| 1128 | } | 1128 | } |
| 1129 | |||
| 1130 | /* | ||
| 1131 | * Allocate user-space memory for the duration of a single system call, | ||
| 1132 | * in order to marshall parameters inside a compat thunk. | ||
| 1133 | */ | ||
| 1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
| 1135 | { | ||
| 1136 | void __user *ptr; | ||
| 1137 | |||
| 1138 | /* If len would occupy more than half of the entire compat space... */ | ||
| 1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
| 1140 | return NULL; | ||
| 1141 | |||
| 1142 | ptr = arch_compat_alloc_user_space(len); | ||
| 1143 | |||
| 1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
| 1145 | return NULL; | ||
| 1146 | |||
| 1147 | return ptr; | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b23c0979bbe7..51b143e2a07a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
| 1397 | if (tsk->flags & PF_THREAD_BOUND) | 1397 | if (tsk->flags & PF_THREAD_BOUND) |
| 1398 | return -EINVAL; | 1398 | return -EINVAL; |
| 1399 | 1399 | ||
| 1400 | ret = security_task_setscheduler(tsk, 0, NULL); | 1400 | ret = security_task_setscheduler(tsk); |
| 1401 | if (ret) | 1401 | if (ret) |
| 1402 | return ret; | 1402 | return ret; |
| 1403 | if (threadgroup) { | 1403 | if (threadgroup) { |
| @@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
| 1405 | 1405 | ||
| 1406 | rcu_read_lock(); | 1406 | rcu_read_lock(); |
| 1407 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1407 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
| 1408 | ret = security_task_setscheduler(c, 0, NULL); | 1408 | ret = security_task_setscheduler(c); |
| 1409 | if (ret) { | 1409 | if (ret) { |
| 1410 | rcu_read_unlock(); | 1410 | rcu_read_unlock(); |
| 1411 | return ret; | 1411 | return ret; |
diff --git a/kernel/exit.c b/kernel/exit.c index 03120229db28..e2bdf37f9fde 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -149,9 +149,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
| 149 | { | 149 | { |
| 150 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 150 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
| 151 | 151 | ||
| 152 | #ifdef CONFIG_PERF_EVENTS | 152 | perf_event_delayed_put(tsk); |
| 153 | WARN_ON_ONCE(tsk->perf_event_ctxp); | ||
| 154 | #endif | ||
| 155 | trace_sched_process_free(tsk); | 153 | trace_sched_process_free(tsk); |
| 156 | put_task_struct(tsk); | 154 | put_task_struct(tsk); |
| 157 | } | 155 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index b7e9d60a675d..c445f8cc408d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 356 | if (IS_ERR(pol)) | 356 | if (IS_ERR(pol)) |
| 357 | goto fail_nomem_policy; | 357 | goto fail_nomem_policy; |
| 358 | vma_set_policy(tmp, pol); | 358 | vma_set_policy(tmp, pol); |
| 359 | tmp->vm_mm = mm; | ||
| 359 | if (anon_vma_fork(tmp, mpnt)) | 360 | if (anon_vma_fork(tmp, mpnt)) |
| 360 | goto fail_nomem_anon_vma_fork; | 361 | goto fail_nomem_anon_vma_fork; |
| 361 | tmp->vm_flags &= ~VM_LOCKED; | 362 | tmp->vm_flags &= ~VM_LOCKED; |
| 362 | tmp->vm_mm = mm; | ||
| 363 | tmp->vm_next = tmp->vm_prev = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
| 364 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
| 365 | if (file) { | 365 | if (file) { |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1decafbb6b1a..72206cf5c6cf 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -931,6 +931,7 @@ static inline int | |||
| 931 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 931 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
| 932 | { | 932 | { |
| 933 | if (hrtimer_is_queued(timer)) { | 933 | if (hrtimer_is_queued(timer)) { |
| 934 | unsigned long state; | ||
| 934 | int reprogram; | 935 | int reprogram; |
| 935 | 936 | ||
| 936 | /* | 937 | /* |
| @@ -944,8 +945,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
| 944 | debug_deactivate(timer); | 945 | debug_deactivate(timer); |
| 945 | timer_stats_hrtimer_clear_start_info(timer); | 946 | timer_stats_hrtimer_clear_start_info(timer); |
| 946 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 947 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
| 947 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 948 | /* |
| 948 | reprogram); | 949 | * We must preserve the CALLBACK state flag here, |
| 950 | * otherwise we could move the timer base in | ||
| 951 | * switch_hrtimer_base. | ||
| 952 | */ | ||
| 953 | state = timer->state & HRTIMER_STATE_CALLBACK; | ||
| 954 | __remove_hrtimer(timer, base, state, reprogram); | ||
| 949 | return 1; | 955 | return 1; |
| 950 | } | 956 | } |
| 951 | return 0; | 957 | return 0; |
| @@ -1231,6 +1237,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
| 1231 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1237 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
| 1232 | enqueue_hrtimer(timer, base); | 1238 | enqueue_hrtimer(timer, base); |
| 1233 | } | 1239 | } |
| 1240 | |||
| 1241 | WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); | ||
| 1242 | |||
| 1234 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1243 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
| 1235 | } | 1244 | } |
| 1236 | 1245 | ||
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0c642d51aac2..53ead174da2f 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
| @@ -98,7 +98,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) | |||
| 98 | printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" | 98 | printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" |
| 99 | " disables this message.\n"); | 99 | " disables this message.\n"); |
| 100 | sched_show_task(t); | 100 | sched_show_task(t); |
| 101 | __debug_show_held_locks(t); | 101 | debug_show_held_locks(t); |
| 102 | 102 | ||
| 103 | touch_nmi_watchdog(); | 103 | touch_nmi_watchdog(); |
| 104 | 104 | ||
| @@ -111,7 +111,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) | |||
| 111 | * periodically exit the critical section and enter a new one. | 111 | * periodically exit the critical section and enter a new one. |
| 112 | * | 112 | * |
| 113 | * For preemptible RCU it is sufficient to call rcu_read_unlock in order | 113 | * For preemptible RCU it is sufficient to call rcu_read_unlock in order |
| 114 | * exit the grace period. For classic RCU, a reschedule is required. | 114 | * to exit the grace period. For classic RCU, a reschedule is required. |
| 115 | */ | 115 | */ |
| 116 | static void rcu_lock_break(struct task_struct *g, struct task_struct *t) | 116 | static void rcu_lock_break(struct task_struct *g, struct task_struct *t) |
| 117 | { | 117 | { |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2bf..2c9120f0afca 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -113,12 +113,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) | |||
| 113 | */ | 113 | */ |
| 114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) | 114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) |
| 115 | { | 115 | { |
| 116 | struct perf_event_context *ctx = bp->ctx; | 116 | struct task_struct *tsk = bp->hw.bp_target; |
| 117 | struct perf_event *iter; | 117 | struct perf_event *iter; |
| 118 | int count = 0; | 118 | int count = 0; |
| 119 | 119 | ||
| 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
| 121 | if (iter->ctx == ctx && find_slot_idx(iter) == type) | 121 | if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) |
| 122 | count += hw_breakpoint_weight(iter); | 122 | count += hw_breakpoint_weight(iter); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| @@ -134,7 +134,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
| 134 | enum bp_type_idx type) | 134 | enum bp_type_idx type) |
| 135 | { | 135 | { |
| 136 | int cpu = bp->cpu; | 136 | int cpu = bp->cpu; |
| 137 | struct task_struct *tsk = bp->ctx->task; | 137 | struct task_struct *tsk = bp->hw.bp_target; |
| 138 | 138 | ||
| 139 | if (cpu >= 0) { | 139 | if (cpu >= 0) { |
| 140 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); | 140 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
| @@ -213,7 +213,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
| 213 | int weight) | 213 | int weight) |
| 214 | { | 214 | { |
| 215 | int cpu = bp->cpu; | 215 | int cpu = bp->cpu; |
| 216 | struct task_struct *tsk = bp->ctx->task; | 216 | struct task_struct *tsk = bp->hw.bp_target; |
| 217 | 217 | ||
| 218 | /* Pinned counter cpu profiling */ | 218 | /* Pinned counter cpu profiling */ |
| 219 | if (!tsk) { | 219 | if (!tsk) { |
| @@ -433,7 +433,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, | |||
| 433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
| 434 | struct task_struct *tsk) | 434 | struct task_struct *tsk) |
| 435 | { | 435 | { |
| 436 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 436 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered); |
| 437 | } | 437 | } |
| 438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 439 | 439 | ||
| @@ -515,7 +515,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
| 515 | get_online_cpus(); | 515 | get_online_cpus(); |
| 516 | for_each_online_cpu(cpu) { | 516 | for_each_online_cpu(cpu) { |
| 517 | pevent = per_cpu_ptr(cpu_events, cpu); | 517 | pevent = per_cpu_ptr(cpu_events, cpu); |
| 518 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); | 518 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); |
| 519 | 519 | ||
| 520 | *pevent = bp; | 520 | *pevent = bp; |
| 521 | 521 | ||
| @@ -565,6 +565,61 @@ static struct notifier_block hw_breakpoint_exceptions_nb = { | |||
| 565 | .priority = 0x7fffffff | 565 | .priority = 0x7fffffff |
| 566 | }; | 566 | }; |
| 567 | 567 | ||
| 568 | static void bp_perf_event_destroy(struct perf_event *event) | ||
| 569 | { | ||
| 570 | release_bp_slot(event); | ||
| 571 | } | ||
| 572 | |||
| 573 | static int hw_breakpoint_event_init(struct perf_event *bp) | ||
| 574 | { | ||
| 575 | int err; | ||
| 576 | |||
| 577 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) | ||
| 578 | return -ENOENT; | ||
| 579 | |||
| 580 | err = register_perf_hw_breakpoint(bp); | ||
| 581 | if (err) | ||
| 582 | return err; | ||
| 583 | |||
| 584 | bp->destroy = bp_perf_event_destroy; | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | } | ||
| 588 | |||
| 589 | static int hw_breakpoint_add(struct perf_event *bp, int flags) | ||
| 590 | { | ||
| 591 | if (!(flags & PERF_EF_START)) | ||
| 592 | bp->hw.state = PERF_HES_STOPPED; | ||
| 593 | |||
| 594 | return arch_install_hw_breakpoint(bp); | ||
| 595 | } | ||
| 596 | |||
| 597 | static void hw_breakpoint_del(struct perf_event *bp, int flags) | ||
| 598 | { | ||
| 599 | arch_uninstall_hw_breakpoint(bp); | ||
| 600 | } | ||
| 601 | |||
| 602 | static void hw_breakpoint_start(struct perf_event *bp, int flags) | ||
| 603 | { | ||
| 604 | bp->hw.state = 0; | ||
| 605 | } | ||
| 606 | |||
| 607 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) | ||
| 608 | { | ||
| 609 | bp->hw.state = PERF_HES_STOPPED; | ||
| 610 | } | ||
| 611 | |||
| 612 | static struct pmu perf_breakpoint = { | ||
| 613 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ | ||
| 614 | |||
| 615 | .event_init = hw_breakpoint_event_init, | ||
| 616 | .add = hw_breakpoint_add, | ||
| 617 | .del = hw_breakpoint_del, | ||
| 618 | .start = hw_breakpoint_start, | ||
| 619 | .stop = hw_breakpoint_stop, | ||
| 620 | .read = hw_breakpoint_pmu_read, | ||
| 621 | }; | ||
| 622 | |||
| 568 | static int __init init_hw_breakpoint(void) | 623 | static int __init init_hw_breakpoint(void) |
| 569 | { | 624 | { |
| 570 | unsigned int **task_bp_pinned; | 625 | unsigned int **task_bp_pinned; |
| @@ -586,6 +641,8 @@ static int __init init_hw_breakpoint(void) | |||
| 586 | 641 | ||
| 587 | constraints_initialized = 1; | 642 | constraints_initialized = 1; |
| 588 | 643 | ||
| 644 | perf_pmu_register(&perf_breakpoint); | ||
| 645 | |||
| 589 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | 646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
| 590 | 647 | ||
| 591 | err_alloc: | 648 | err_alloc: |
| @@ -601,8 +658,3 @@ static int __init init_hw_breakpoint(void) | |||
| 601 | core_initcall(init_hw_breakpoint); | 658 | core_initcall(init_hw_breakpoint); |
| 602 | 659 | ||
| 603 | 660 | ||
| 604 | struct pmu perf_ops_bp = { | ||
| 605 | .enable = arch_install_hw_breakpoint, | ||
| 606 | .disable = arch_uninstall_hw_breakpoint, | ||
| 607 | .read = hw_breakpoint_pmu_read, | ||
| 608 | }; | ||
diff --git a/kernel/irq_work.c b/kernel/irq_work.c new file mode 100644 index 000000000000..f16763ff8481 --- /dev/null +++ b/kernel/irq_work.c | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
| 3 | * | ||
| 4 | * Provides a framework for enqueueing and running callbacks from hardirq | ||
| 5 | * context. The enqueueing is NMI-safe. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/irq_work.h> | ||
| 11 | #include <linux/hardirq.h> | ||
| 12 | |||
| 13 | /* | ||
| 14 | * An entry can be in one of four states: | ||
| 15 | * | ||
| 16 | * free NULL, 0 -> {claimed} : free to be used | ||
| 17 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | ||
| 18 | * pending next, 3 -> {busy} : queued, pending callback | ||
| 19 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | ||
| 20 | * | ||
| 21 | * We use the lower two bits of the next pointer to keep PENDING and BUSY | ||
| 22 | * flags. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define IRQ_WORK_PENDING 1UL | ||
| 26 | #define IRQ_WORK_BUSY 2UL | ||
| 27 | #define IRQ_WORK_FLAGS 3UL | ||
| 28 | |||
| 29 | static inline bool irq_work_is_set(struct irq_work *entry, int flags) | ||
| 30 | { | ||
| 31 | return (unsigned long)entry->next & flags; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline struct irq_work *irq_work_next(struct irq_work *entry) | ||
| 35 | { | ||
| 36 | unsigned long next = (unsigned long)entry->next; | ||
| 37 | next &= ~IRQ_WORK_FLAGS; | ||
| 38 | return (struct irq_work *)next; | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline struct irq_work *next_flags(struct irq_work *entry, int flags) | ||
| 42 | { | ||
| 43 | unsigned long next = (unsigned long)entry; | ||
| 44 | next |= flags; | ||
| 45 | return (struct irq_work *)next; | ||
| 46 | } | ||
| 47 | |||
| 48 | static DEFINE_PER_CPU(struct irq_work *, irq_work_list); | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Claim the entry so that no one else will poke at it. | ||
| 52 | */ | ||
| 53 | static bool irq_work_claim(struct irq_work *entry) | ||
| 54 | { | ||
| 55 | struct irq_work *next, *nflags; | ||
| 56 | |||
| 57 | do { | ||
| 58 | next = entry->next; | ||
| 59 | if ((unsigned long)next & IRQ_WORK_PENDING) | ||
| 60 | return false; | ||
| 61 | nflags = next_flags(next, IRQ_WORK_FLAGS); | ||
| 62 | } while (cmpxchg(&entry->next, next, nflags) != next); | ||
| 63 | |||
| 64 | return true; | ||
| 65 | } | ||
| 66 | |||
| 67 | |||
| 68 | void __weak arch_irq_work_raise(void) | ||
| 69 | { | ||
| 70 | /* | ||
| 71 | * Lame architectures will get the timer tick callback | ||
| 72 | */ | ||
| 73 | } | ||
| 74 | |||
| 75 | /* | ||
| 76 | * Queue the entry and raise the IPI if needed. | ||
| 77 | */ | ||
| 78 | static void __irq_work_queue(struct irq_work *entry) | ||
| 79 | { | ||
| 80 | struct irq_work **head, *next; | ||
| 81 | |||
| 82 | head = &get_cpu_var(irq_work_list); | ||
| 83 | |||
| 84 | do { | ||
| 85 | next = *head; | ||
| 86 | /* Can assign non-atomic because we keep the flags set. */ | ||
| 87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); | ||
| 88 | } while (cmpxchg(head, next, entry) != next); | ||
| 89 | |||
| 90 | /* The list was empty, raise self-interrupt to start processing. */ | ||
| 91 | if (!irq_work_next(entry)) | ||
| 92 | arch_irq_work_raise(); | ||
| 93 | |||
| 94 | put_cpu_var(irq_work_list); | ||
| 95 | } | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Enqueue the irq_work @entry, returns true on success, failure when the | ||
| 99 | * @entry was already enqueued by someone else. | ||
| 100 | * | ||
| 101 | * Can be re-enqueued while the callback is still in progress. | ||
| 102 | */ | ||
| 103 | bool irq_work_queue(struct irq_work *entry) | ||
| 104 | { | ||
| 105 | if (!irq_work_claim(entry)) { | ||
| 106 | /* | ||
| 107 | * Already enqueued, can't do! | ||
| 108 | */ | ||
| 109 | return false; | ||
| 110 | } | ||
| 111 | |||
| 112 | __irq_work_queue(entry); | ||
| 113 | return true; | ||
| 114 | } | ||
| 115 | EXPORT_SYMBOL_GPL(irq_work_queue); | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | ||
| 119 | * context with local IRQs disabled. | ||
| 120 | */ | ||
| 121 | void irq_work_run(void) | ||
| 122 | { | ||
| 123 | struct irq_work *list, **head; | ||
| 124 | |||
| 125 | head = &__get_cpu_var(irq_work_list); | ||
| 126 | if (*head == NULL) | ||
| 127 | return; | ||
| 128 | |||
| 129 | BUG_ON(!in_irq()); | ||
| 130 | BUG_ON(!irqs_disabled()); | ||
| 131 | |||
| 132 | list = xchg(head, NULL); | ||
| 133 | while (list != NULL) { | ||
| 134 | struct irq_work *entry = list; | ||
| 135 | |||
| 136 | list = irq_work_next(list); | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Clear the PENDING bit, after this point the @entry | ||
| 140 | * can be re-used. | ||
| 141 | */ | ||
| 142 | entry->next = next_flags(NULL, IRQ_WORK_BUSY); | ||
| 143 | entry->func(entry); | ||
| 144 | /* | ||
| 145 | * Clear the BUSY bit and return to the free state if | ||
| 146 | * no-one else claimed it meanwhile. | ||
| 147 | */ | ||
| 148 | cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); | ||
| 149 | } | ||
| 150 | } | ||
| 151 | EXPORT_SYMBOL_GPL(irq_work_run); | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Synchronize against the irq_work @entry, ensures the entry is not | ||
| 155 | * currently in use. | ||
| 156 | */ | ||
| 157 | void irq_work_sync(struct irq_work *entry) | ||
| 158 | { | ||
| 159 | WARN_ON_ONCE(irqs_disabled()); | ||
| 160 | |||
| 161 | while (irq_work_is_set(entry, IRQ_WORK_BUSY)) | ||
| 162 | cpu_relax(); | ||
| 163 | } | ||
| 164 | EXPORT_SYMBOL_GPL(irq_work_sync); | ||
diff --git a/kernel/jump_label.c b/kernel/jump_label.c new file mode 100644 index 000000000000..7be868bf25c6 --- /dev/null +++ b/kernel/jump_label.c | |||
| @@ -0,0 +1,429 @@ | |||
| 1 | /* | ||
| 2 | * jump label support | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | #include <linux/jump_label.h> | ||
| 8 | #include <linux/memory.h> | ||
| 9 | #include <linux/uaccess.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/list.h> | ||
| 12 | #include <linux/jhash.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/sort.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | |||
| 17 | #ifdef HAVE_JUMP_LABEL | ||
| 18 | |||
| 19 | #define JUMP_LABEL_HASH_BITS 6 | ||
| 20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) | ||
| 21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; | ||
| 22 | |||
| 23 | /* mutex to protect coming/going of the the jump_label table */ | ||
| 24 | static DEFINE_MUTEX(jump_label_mutex); | ||
| 25 | |||
| 26 | struct jump_label_entry { | ||
| 27 | struct hlist_node hlist; | ||
| 28 | struct jump_entry *table; | ||
| 29 | int nr_entries; | ||
| 30 | /* hang modules off here */ | ||
| 31 | struct hlist_head modules; | ||
| 32 | unsigned long key; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct jump_label_module_entry { | ||
| 36 | struct hlist_node hlist; | ||
| 37 | struct jump_entry *table; | ||
| 38 | int nr_entries; | ||
| 39 | struct module *mod; | ||
| 40 | }; | ||
| 41 | |||
| 42 | static int jump_label_cmp(const void *a, const void *b) | ||
| 43 | { | ||
| 44 | const struct jump_entry *jea = a; | ||
| 45 | const struct jump_entry *jeb = b; | ||
| 46 | |||
| 47 | if (jea->key < jeb->key) | ||
| 48 | return -1; | ||
| 49 | |||
| 50 | if (jea->key > jeb->key) | ||
| 51 | return 1; | ||
| 52 | |||
| 53 | return 0; | ||
| 54 | } | ||
| 55 | |||
| 56 | static void | ||
| 57 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | ||
| 58 | { | ||
| 59 | unsigned long size; | ||
| 60 | |||
| 61 | size = (((unsigned long)stop - (unsigned long)start) | ||
| 62 | / sizeof(struct jump_entry)); | ||
| 63 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | ||
| 64 | } | ||
| 65 | |||
| 66 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) | ||
| 67 | { | ||
| 68 | struct hlist_head *head; | ||
| 69 | struct hlist_node *node; | ||
| 70 | struct jump_label_entry *e; | ||
| 71 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
| 72 | |||
| 73 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
| 74 | hlist_for_each_entry(e, node, head, hlist) { | ||
| 75 | if (key == e->key) | ||
| 76 | return e; | ||
| 77 | } | ||
| 78 | return NULL; | ||
| 79 | } | ||
| 80 | |||
| 81 | static struct jump_label_entry * | ||
| 82 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) | ||
| 83 | { | ||
| 84 | struct hlist_head *head; | ||
| 85 | struct jump_label_entry *e; | ||
| 86 | u32 hash; | ||
| 87 | |||
| 88 | e = get_jump_label_entry(key); | ||
| 89 | if (e) | ||
| 90 | return ERR_PTR(-EEXIST); | ||
| 91 | |||
| 92 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); | ||
| 93 | if (!e) | ||
| 94 | return ERR_PTR(-ENOMEM); | ||
| 95 | |||
| 96 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
| 97 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
| 98 | e->key = key; | ||
| 99 | e->table = table; | ||
| 100 | e->nr_entries = nr_entries; | ||
| 101 | INIT_HLIST_HEAD(&(e->modules)); | ||
| 102 | hlist_add_head(&e->hlist, head); | ||
| 103 | return e; | ||
| 104 | } | ||
| 105 | |||
| 106 | static int | ||
| 107 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) | ||
| 108 | { | ||
| 109 | struct jump_entry *iter, *iter_begin; | ||
| 110 | struct jump_label_entry *entry; | ||
| 111 | int count; | ||
| 112 | |||
| 113 | sort_jump_label_entries(start, stop); | ||
| 114 | iter = start; | ||
| 115 | while (iter < stop) { | ||
| 116 | entry = get_jump_label_entry(iter->key); | ||
| 117 | if (!entry) { | ||
| 118 | iter_begin = iter; | ||
| 119 | count = 0; | ||
| 120 | while ((iter < stop) && | ||
| 121 | (iter->key == iter_begin->key)) { | ||
| 122 | iter++; | ||
| 123 | count++; | ||
| 124 | } | ||
| 125 | entry = add_jump_label_entry(iter_begin->key, | ||
| 126 | count, iter_begin); | ||
| 127 | if (IS_ERR(entry)) | ||
| 128 | return PTR_ERR(entry); | ||
| 129 | } else { | ||
| 130 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); | ||
| 131 | return -1; | ||
| 132 | } | ||
| 133 | } | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | /*** | ||
| 138 | * jump_label_update - update jump label text | ||
| 139 | * @key - key value associated with a a jump label | ||
| 140 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE | ||
| 141 | * | ||
| 142 | * Will enable/disable the jump for jump label @key, depending on the | ||
| 143 | * value of @type. | ||
| 144 | * | ||
| 145 | */ | ||
| 146 | |||
| 147 | void jump_label_update(unsigned long key, enum jump_label_type type) | ||
| 148 | { | ||
| 149 | struct jump_entry *iter; | ||
| 150 | struct jump_label_entry *entry; | ||
| 151 | struct hlist_node *module_node; | ||
| 152 | struct jump_label_module_entry *e_module; | ||
| 153 | int count; | ||
| 154 | |||
| 155 | mutex_lock(&jump_label_mutex); | ||
| 156 | entry = get_jump_label_entry((jump_label_t)key); | ||
| 157 | if (entry) { | ||
| 158 | count = entry->nr_entries; | ||
| 159 | iter = entry->table; | ||
| 160 | while (count--) { | ||
| 161 | if (kernel_text_address(iter->code)) | ||
| 162 | arch_jump_label_transform(iter, type); | ||
| 163 | iter++; | ||
| 164 | } | ||
| 165 | /* eanble/disable jump labels in modules */ | ||
| 166 | hlist_for_each_entry(e_module, module_node, &(entry->modules), | ||
| 167 | hlist) { | ||
| 168 | count = e_module->nr_entries; | ||
| 169 | iter = e_module->table; | ||
| 170 | while (count--) { | ||
| 171 | if (kernel_text_address(iter->code)) | ||
| 172 | arch_jump_label_transform(iter, type); | ||
| 173 | iter++; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | } | ||
| 177 | mutex_unlock(&jump_label_mutex); | ||
| 178 | } | ||
| 179 | |||
| 180 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) | ||
| 181 | { | ||
| 182 | if (entry->code <= (unsigned long)end && | ||
| 183 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) | ||
| 184 | return 1; | ||
| 185 | |||
| 186 | return 0; | ||
| 187 | } | ||
| 188 | |||
| 189 | #ifdef CONFIG_MODULES | ||
| 190 | |||
| 191 | static int module_conflict(void *start, void *end) | ||
| 192 | { | ||
| 193 | struct hlist_head *head; | ||
| 194 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
| 195 | struct jump_label_entry *e; | ||
| 196 | struct jump_label_module_entry *e_module; | ||
| 197 | struct jump_entry *iter; | ||
| 198 | int i, count; | ||
| 199 | int conflict = 0; | ||
| 200 | |||
| 201 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
| 202 | head = &jump_label_table[i]; | ||
| 203 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
| 204 | hlist_for_each_entry_safe(e_module, module_node, | ||
| 205 | module_node_next, | ||
| 206 | &(e->modules), hlist) { | ||
| 207 | count = e_module->nr_entries; | ||
| 208 | iter = e_module->table; | ||
| 209 | while (count--) { | ||
| 210 | if (addr_conflict(iter, start, end)) { | ||
| 211 | conflict = 1; | ||
| 212 | goto out; | ||
| 213 | } | ||
| 214 | iter++; | ||
| 215 | } | ||
| 216 | } | ||
| 217 | } | ||
| 218 | } | ||
| 219 | out: | ||
| 220 | return conflict; | ||
| 221 | } | ||
| 222 | |||
| 223 | #endif | ||
| 224 | |||
| 225 | /*** | ||
| 226 | * jump_label_text_reserved - check if addr range is reserved | ||
| 227 | * @start: start text addr | ||
| 228 | * @end: end text addr | ||
| 229 | * | ||
| 230 | * checks if the text addr located between @start and @end | ||
| 231 | * overlaps with any of the jump label patch addresses. Code | ||
| 232 | * that wants to modify kernel text should first verify that | ||
| 233 | * it does not overlap with any of the jump label addresses. | ||
| 234 | * | ||
| 235 | * returns 1 if there is an overlap, 0 otherwise | ||
| 236 | */ | ||
| 237 | int jump_label_text_reserved(void *start, void *end) | ||
| 238 | { | ||
| 239 | struct jump_entry *iter; | ||
| 240 | struct jump_entry *iter_start = __start___jump_table; | ||
| 241 | struct jump_entry *iter_stop = __start___jump_table; | ||
| 242 | int conflict = 0; | ||
| 243 | |||
| 244 | mutex_lock(&jump_label_mutex); | ||
| 245 | iter = iter_start; | ||
| 246 | while (iter < iter_stop) { | ||
| 247 | if (addr_conflict(iter, start, end)) { | ||
| 248 | conflict = 1; | ||
| 249 | goto out; | ||
| 250 | } | ||
| 251 | iter++; | ||
| 252 | } | ||
| 253 | |||
| 254 | /* now check modules */ | ||
| 255 | #ifdef CONFIG_MODULES | ||
| 256 | conflict = module_conflict(start, end); | ||
| 257 | #endif | ||
| 258 | out: | ||
| 259 | mutex_unlock(&jump_label_mutex); | ||
| 260 | return conflict; | ||
| 261 | } | ||
| 262 | |||
| 263 | static __init int init_jump_label(void) | ||
| 264 | { | ||
| 265 | int ret; | ||
| 266 | struct jump_entry *iter_start = __start___jump_table; | ||
| 267 | struct jump_entry *iter_stop = __stop___jump_table; | ||
| 268 | struct jump_entry *iter; | ||
| 269 | |||
| 270 | mutex_lock(&jump_label_mutex); | ||
| 271 | ret = build_jump_label_hashtable(__start___jump_table, | ||
| 272 | __stop___jump_table); | ||
| 273 | iter = iter_start; | ||
| 274 | while (iter < iter_stop) { | ||
| 275 | arch_jump_label_text_poke_early(iter->code); | ||
| 276 | iter++; | ||
| 277 | } | ||
| 278 | mutex_unlock(&jump_label_mutex); | ||
| 279 | return ret; | ||
| 280 | } | ||
| 281 | early_initcall(init_jump_label); | ||
| 282 | |||
| 283 | #ifdef CONFIG_MODULES | ||
| 284 | |||
| 285 | static struct jump_label_module_entry * | ||
| 286 | add_jump_label_module_entry(struct jump_label_entry *entry, | ||
| 287 | struct jump_entry *iter_begin, | ||
| 288 | int count, struct module *mod) | ||
| 289 | { | ||
| 290 | struct jump_label_module_entry *e; | ||
| 291 | |||
| 292 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); | ||
| 293 | if (!e) | ||
| 294 | return ERR_PTR(-ENOMEM); | ||
| 295 | e->mod = mod; | ||
| 296 | e->nr_entries = count; | ||
| 297 | e->table = iter_begin; | ||
| 298 | hlist_add_head(&e->hlist, &entry->modules); | ||
| 299 | return e; | ||
| 300 | } | ||
| 301 | |||
| 302 | static int add_jump_label_module(struct module *mod) | ||
| 303 | { | ||
| 304 | struct jump_entry *iter, *iter_begin; | ||
| 305 | struct jump_label_entry *entry; | ||
| 306 | struct jump_label_module_entry *module_entry; | ||
| 307 | int count; | ||
| 308 | |||
| 309 | /* if the module doesn't have jump label entries, just return */ | ||
| 310 | if (!mod->num_jump_entries) | ||
| 311 | return 0; | ||
| 312 | |||
| 313 | sort_jump_label_entries(mod->jump_entries, | ||
| 314 | mod->jump_entries + mod->num_jump_entries); | ||
| 315 | iter = mod->jump_entries; | ||
| 316 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
| 317 | entry = get_jump_label_entry(iter->key); | ||
| 318 | iter_begin = iter; | ||
| 319 | count = 0; | ||
| 320 | while ((iter < mod->jump_entries + mod->num_jump_entries) && | ||
| 321 | (iter->key == iter_begin->key)) { | ||
| 322 | iter++; | ||
| 323 | count++; | ||
| 324 | } | ||
| 325 | if (!entry) { | ||
| 326 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); | ||
| 327 | if (IS_ERR(entry)) | ||
| 328 | return PTR_ERR(entry); | ||
| 329 | } | ||
| 330 | module_entry = add_jump_label_module_entry(entry, iter_begin, | ||
| 331 | count, mod); | ||
| 332 | if (IS_ERR(module_entry)) | ||
| 333 | return PTR_ERR(module_entry); | ||
| 334 | } | ||
| 335 | return 0; | ||
| 336 | } | ||
| 337 | |||
| 338 | static void remove_jump_label_module(struct module *mod) | ||
| 339 | { | ||
| 340 | struct hlist_head *head; | ||
| 341 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
| 342 | struct jump_label_entry *e; | ||
| 343 | struct jump_label_module_entry *e_module; | ||
| 344 | int i; | ||
| 345 | |||
| 346 | /* if the module doesn't have jump label entries, just return */ | ||
| 347 | if (!mod->num_jump_entries) | ||
| 348 | return; | ||
| 349 | |||
| 350 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
| 351 | head = &jump_label_table[i]; | ||
| 352 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
| 353 | hlist_for_each_entry_safe(e_module, module_node, | ||
| 354 | module_node_next, | ||
| 355 | &(e->modules), hlist) { | ||
| 356 | if (e_module->mod == mod) { | ||
| 357 | hlist_del(&e_module->hlist); | ||
| 358 | kfree(e_module); | ||
| 359 | } | ||
| 360 | } | ||
| 361 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { | ||
| 362 | hlist_del(&e->hlist); | ||
| 363 | kfree(e); | ||
| 364 | } | ||
| 365 | } | ||
| 366 | } | ||
| 367 | } | ||
| 368 | |||
| 369 | static int | ||
| 370 | jump_label_module_notify(struct notifier_block *self, unsigned long val, | ||
| 371 | void *data) | ||
| 372 | { | ||
| 373 | struct module *mod = data; | ||
| 374 | int ret = 0; | ||
| 375 | |||
| 376 | switch (val) { | ||
| 377 | case MODULE_STATE_COMING: | ||
| 378 | mutex_lock(&jump_label_mutex); | ||
| 379 | ret = add_jump_label_module(mod); | ||
| 380 | if (ret) | ||
| 381 | remove_jump_label_module(mod); | ||
| 382 | mutex_unlock(&jump_label_mutex); | ||
| 383 | break; | ||
| 384 | case MODULE_STATE_GOING: | ||
| 385 | mutex_lock(&jump_label_mutex); | ||
| 386 | remove_jump_label_module(mod); | ||
| 387 | mutex_unlock(&jump_label_mutex); | ||
| 388 | break; | ||
| 389 | } | ||
| 390 | return ret; | ||
| 391 | } | ||
| 392 | |||
| 393 | /*** | ||
| 394 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
| 395 | * @mod: module to patch | ||
| 396 | * | ||
| 397 | * Allow for run-time selection of the optimal nops. Before the module | ||
| 398 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
| 399 | * the arch specific jump label code. | ||
| 400 | */ | ||
| 401 | void jump_label_apply_nops(struct module *mod) | ||
| 402 | { | ||
| 403 | struct jump_entry *iter; | ||
| 404 | |||
| 405 | /* if the module doesn't have jump label entries, just return */ | ||
| 406 | if (!mod->num_jump_entries) | ||
| 407 | return; | ||
| 408 | |||
| 409 | iter = mod->jump_entries; | ||
| 410 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
| 411 | arch_jump_label_text_poke_early(iter->code); | ||
| 412 | iter++; | ||
| 413 | } | ||
| 414 | } | ||
| 415 | |||
| 416 | struct notifier_block jump_label_module_nb = { | ||
| 417 | .notifier_call = jump_label_module_notify, | ||
| 418 | .priority = 0, | ||
| 419 | }; | ||
| 420 | |||
| 421 | static __init int init_jump_label_module(void) | ||
| 422 | { | ||
| 423 | return register_module_notifier(&jump_label_module_nb); | ||
| 424 | } | ||
| 425 | early_initcall(init_jump_label_module); | ||
| 426 | |||
| 427 | #endif /* CONFIG_MODULES */ | ||
| 428 | |||
| 429 | #endif | ||
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 6b5580c57644..01a0700e873f 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | |||
| 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); |
| 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); |
| 367 | 367 | ||
| 368 | if (n) | ||
| 369 | sg_mark_end(sgl + n - 1); | ||
| 370 | return n; | 368 | return n; |
| 371 | } | 369 | } |
| 372 | 370 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 282035f3ae96..ec4210c6501e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/memory.h> | 47 | #include <linux/memory.h> |
| 48 | #include <linux/ftrace.h> | 48 | #include <linux/ftrace.h> |
| 49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
| 50 | #include <linux/jump_label.h> | ||
| 50 | 51 | ||
| 51 | #include <asm-generic/sections.h> | 52 | #include <asm-generic/sections.h> |
| 52 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
| @@ -399,7 +400,7 @@ static inline int kprobe_optready(struct kprobe *p) | |||
| 399 | * Return an optimized kprobe whose optimizing code replaces | 400 | * Return an optimized kprobe whose optimizing code replaces |
| 400 | * instructions including addr (exclude breakpoint). | 401 | * instructions including addr (exclude breakpoint). |
| 401 | */ | 402 | */ |
| 402 | struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | 403 | static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) |
| 403 | { | 404 | { |
| 404 | int i; | 405 | int i; |
| 405 | struct kprobe *p = NULL; | 406 | struct kprobe *p = NULL; |
| @@ -831,6 +832,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
| 831 | 832 | ||
| 832 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | 833 | void __kprobes kretprobe_hash_lock(struct task_struct *tsk, |
| 833 | struct hlist_head **head, unsigned long *flags) | 834 | struct hlist_head **head, unsigned long *flags) |
| 835 | __acquires(hlist_lock) | ||
| 834 | { | 836 | { |
| 835 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 837 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 836 | spinlock_t *hlist_lock; | 838 | spinlock_t *hlist_lock; |
| @@ -842,6 +844,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
| 842 | 844 | ||
| 843 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 845 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
| 844 | unsigned long *flags) | 846 | unsigned long *flags) |
| 847 | __acquires(hlist_lock) | ||
| 845 | { | 848 | { |
| 846 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 849 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 847 | spin_lock_irqsave(hlist_lock, *flags); | 850 | spin_lock_irqsave(hlist_lock, *flags); |
| @@ -849,6 +852,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash, | |||
| 849 | 852 | ||
| 850 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 853 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
| 851 | unsigned long *flags) | 854 | unsigned long *flags) |
| 855 | __releases(hlist_lock) | ||
| 852 | { | 856 | { |
| 853 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 857 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
| 854 | spinlock_t *hlist_lock; | 858 | spinlock_t *hlist_lock; |
| @@ -857,7 +861,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
| 857 | spin_unlock_irqrestore(hlist_lock, *flags); | 861 | spin_unlock_irqrestore(hlist_lock, *flags); |
| 858 | } | 862 | } |
| 859 | 863 | ||
| 860 | void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | 864 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
| 865 | unsigned long *flags) | ||
| 866 | __releases(hlist_lock) | ||
| 861 | { | 867 | { |
| 862 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 868 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
| 863 | spin_unlock_irqrestore(hlist_lock, *flags); | 869 | spin_unlock_irqrestore(hlist_lock, *flags); |
| @@ -1141,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 1141 | preempt_disable(); | 1147 | preempt_disable(); |
| 1142 | if (!kernel_text_address((unsigned long) p->addr) || | 1148 | if (!kernel_text_address((unsigned long) p->addr) || |
| 1143 | in_kprobes_functions((unsigned long) p->addr) || | 1149 | in_kprobes_functions((unsigned long) p->addr) || |
| 1144 | ftrace_text_reserved(p->addr, p->addr)) { | 1150 | ftrace_text_reserved(p->addr, p->addr) || |
| 1151 | jump_label_text_reserved(p->addr, p->addr)) { | ||
| 1145 | preempt_enable(); | 1152 | preempt_enable(); |
| 1146 | return -EINVAL; | 1153 | return -EINVAL; |
| 1147 | } | 1154 | } |
| @@ -1339,18 +1346,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) | |||
| 1339 | if (num <= 0) | 1346 | if (num <= 0) |
| 1340 | return -EINVAL; | 1347 | return -EINVAL; |
| 1341 | for (i = 0; i < num; i++) { | 1348 | for (i = 0; i < num; i++) { |
| 1342 | unsigned long addr; | 1349 | unsigned long addr, offset; |
| 1343 | jp = jps[i]; | 1350 | jp = jps[i]; |
| 1344 | addr = arch_deref_entry_point(jp->entry); | 1351 | addr = arch_deref_entry_point(jp->entry); |
| 1345 | 1352 | ||
| 1346 | if (!kernel_text_address(addr)) | 1353 | /* Verify probepoint is a function entry point */ |
| 1347 | ret = -EINVAL; | 1354 | if (kallsyms_lookup_size_offset(addr, NULL, &offset) && |
| 1348 | else { | 1355 | offset == 0) { |
| 1349 | /* Todo: Verify probepoint is a function entry point */ | ||
| 1350 | jp->kp.pre_handler = setjmp_pre_handler; | 1356 | jp->kp.pre_handler = setjmp_pre_handler; |
| 1351 | jp->kp.break_handler = longjmp_break_handler; | 1357 | jp->kp.break_handler = longjmp_break_handler; |
| 1352 | ret = register_kprobe(&jp->kp); | 1358 | ret = register_kprobe(&jp->kp); |
| 1353 | } | 1359 | } else |
| 1360 | ret = -EINVAL; | ||
| 1361 | |||
| 1354 | if (ret < 0) { | 1362 | if (ret < 0) { |
| 1355 | if (i > 0) | 1363 | if (i > 0) |
| 1356 | unregister_jprobes(jps, i); | 1364 | unregister_jprobes(jps, i); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f2852a510232..42ba65dff7d9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 639 | } | 639 | } |
| 640 | #endif | 640 | #endif |
| 641 | 641 | ||
| 642 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | ||
| 643 | debug_locks_off(); | ||
| 644 | printk(KERN_ERR | ||
| 645 | "BUG: looking up invalid subclass: %u\n", subclass); | ||
| 646 | printk(KERN_ERR | ||
| 647 | "turning off the locking correctness validator.\n"); | ||
| 648 | dump_stack(); | ||
| 649 | return NULL; | ||
| 650 | } | ||
| 651 | |||
| 642 | /* | 652 | /* |
| 643 | * Static locks do not have their class-keys yet - for them the key | 653 | * Static locks do not have their class-keys yet - for them the key |
| 644 | * is the lock object itself: | 654 | * is the lock object itself: |
| @@ -774,7 +784,9 @@ out_unlock_set: | |||
| 774 | raw_local_irq_restore(flags); | 784 | raw_local_irq_restore(flags); |
| 775 | 785 | ||
| 776 | if (!subclass || force) | 786 | if (!subclass || force) |
| 777 | lock->class_cache = class; | 787 | lock->class_cache[0] = class; |
| 788 | else if (subclass < NR_LOCKDEP_CACHING_CLASSES) | ||
| 789 | lock->class_cache[subclass] = class; | ||
| 778 | 790 | ||
| 779 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) | 791 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
| 780 | return NULL; | 792 | return NULL; |
| @@ -2679,7 +2691,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2679 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2691 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 2680 | struct lock_class_key *key, int subclass) | 2692 | struct lock_class_key *key, int subclass) |
| 2681 | { | 2693 | { |
| 2682 | lock->class_cache = NULL; | 2694 | int i; |
| 2695 | |||
| 2696 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | ||
| 2697 | lock->class_cache[i] = NULL; | ||
| 2698 | |||
| 2683 | #ifdef CONFIG_LOCK_STAT | 2699 | #ifdef CONFIG_LOCK_STAT |
| 2684 | lock->cpu = raw_smp_processor_id(); | 2700 | lock->cpu = raw_smp_processor_id(); |
| 2685 | #endif | 2701 | #endif |
| @@ -2739,21 +2755,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2739 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2755 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2740 | return 0; | 2756 | return 0; |
| 2741 | 2757 | ||
| 2742 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | ||
| 2743 | debug_locks_off(); | ||
| 2744 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | ||
| 2745 | printk("turning off the locking correctness validator.\n"); | ||
| 2746 | dump_stack(); | ||
| 2747 | return 0; | ||
| 2748 | } | ||
| 2749 | |||
| 2750 | if (lock->key == &__lockdep_no_validate__) | 2758 | if (lock->key == &__lockdep_no_validate__) |
| 2751 | check = 1; | 2759 | check = 1; |
| 2752 | 2760 | ||
| 2753 | if (!subclass) | 2761 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
| 2754 | class = lock->class_cache; | 2762 | class = lock->class_cache[subclass]; |
| 2755 | /* | 2763 | /* |
| 2756 | * Not cached yet or subclass? | 2764 | * Not cached? |
| 2757 | */ | 2765 | */ |
| 2758 | if (unlikely(!class)) { | 2766 | if (unlikely(!class)) { |
| 2759 | class = register_lock_class(lock, subclass, 0); | 2767 | class = register_lock_class(lock, subclass, 0); |
| @@ -2918,7 +2926,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
| 2918 | return 1; | 2926 | return 1; |
| 2919 | 2927 | ||
| 2920 | if (hlock->references) { | 2928 | if (hlock->references) { |
| 2921 | struct lock_class *class = lock->class_cache; | 2929 | struct lock_class *class = lock->class_cache[0]; |
| 2922 | 2930 | ||
| 2923 | if (!class) | 2931 | if (!class) |
| 2924 | class = look_up_lock_class(lock, 0); | 2932 | class = look_up_lock_class(lock, 0); |
| @@ -3559,7 +3567,12 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
| 3559 | if (list_empty(head)) | 3567 | if (list_empty(head)) |
| 3560 | continue; | 3568 | continue; |
| 3561 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3569 | list_for_each_entry_safe(class, next, head, hash_entry) { |
| 3562 | if (unlikely(class == lock->class_cache)) { | 3570 | int match = 0; |
| 3571 | |||
| 3572 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | ||
| 3573 | match |= class == lock->class_cache[j]; | ||
| 3574 | |||
| 3575 | if (unlikely(match)) { | ||
| 3563 | if (debug_locks_off_graph_unlock()) | 3576 | if (debug_locks_off_graph_unlock()) |
| 3564 | WARN_ON(1); | 3577 | WARN_ON(1); |
| 3565 | goto out_restore; | 3578 | goto out_restore; |
| @@ -3775,7 +3788,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks); | |||
| 3775 | * Careful: only use this function if you are sure that | 3788 | * Careful: only use this function if you are sure that |
| 3776 | * the task cannot run in parallel! | 3789 | * the task cannot run in parallel! |
| 3777 | */ | 3790 | */ |
| 3778 | void __debug_show_held_locks(struct task_struct *task) | 3791 | void debug_show_held_locks(struct task_struct *task) |
| 3779 | { | 3792 | { |
| 3780 | if (unlikely(!debug_locks)) { | 3793 | if (unlikely(!debug_locks)) { |
| 3781 | printk("INFO: lockdep is turned off.\n"); | 3794 | printk("INFO: lockdep is turned off.\n"); |
| @@ -3783,12 +3796,6 @@ void __debug_show_held_locks(struct task_struct *task) | |||
| 3783 | } | 3796 | } |
| 3784 | lockdep_print_held_locks(task); | 3797 | lockdep_print_held_locks(task); |
| 3785 | } | 3798 | } |
| 3786 | EXPORT_SYMBOL_GPL(__debug_show_held_locks); | ||
| 3787 | |||
| 3788 | void debug_show_held_locks(struct task_struct *task) | ||
| 3789 | { | ||
| 3790 | __debug_show_held_locks(task); | ||
| 3791 | } | ||
| 3792 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 3799 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
| 3793 | 3800 | ||
| 3794 | void lockdep_sys_exit(void) | 3801 | void lockdep_sys_exit(void) |
diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b4..2df46301a7a4 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | #include <linux/async.h> | 55 | #include <linux/async.h> |
| 56 | #include <linux/percpu.h> | 56 | #include <linux/percpu.h> |
| 57 | #include <linux/kmemleak.h> | 57 | #include <linux/kmemleak.h> |
| 58 | #include <linux/jump_label.h> | ||
| 58 | 59 | ||
| 59 | #define CREATE_TRACE_POINTS | 60 | #define CREATE_TRACE_POINTS |
| 60 | #include <trace/events/module.h> | 61 | #include <trace/events/module.h> |
| @@ -1537,6 +1538,7 @@ static int __unlink_module(void *_mod) | |||
| 1537 | { | 1538 | { |
| 1538 | struct module *mod = _mod; | 1539 | struct module *mod = _mod; |
| 1539 | list_del(&mod->list); | 1540 | list_del(&mod->list); |
| 1541 | module_bug_cleanup(mod); | ||
| 1540 | return 0; | 1542 | return 0; |
| 1541 | } | 1543 | } |
| 1542 | 1544 | ||
| @@ -2308,6 +2310,11 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
| 2308 | sizeof(*mod->tracepoints), | 2310 | sizeof(*mod->tracepoints), |
| 2309 | &mod->num_tracepoints); | 2311 | &mod->num_tracepoints); |
| 2310 | #endif | 2312 | #endif |
| 2313 | #ifdef HAVE_JUMP_LABEL | ||
| 2314 | mod->jump_entries = section_objs(info, "__jump_table", | ||
| 2315 | sizeof(*mod->jump_entries), | ||
| 2316 | &mod->num_jump_entries); | ||
| 2317 | #endif | ||
| 2311 | #ifdef CONFIG_EVENT_TRACING | 2318 | #ifdef CONFIG_EVENT_TRACING |
| 2312 | mod->trace_events = section_objs(info, "_ftrace_events", | 2319 | mod->trace_events = section_objs(info, "_ftrace_events", |
| 2313 | sizeof(*mod->trace_events), | 2320 | sizeof(*mod->trace_events), |
| @@ -2625,6 +2632,7 @@ static struct module *load_module(void __user *umod, | |||
| 2625 | if (err < 0) | 2632 | if (err < 0) |
| 2626 | goto ddebug; | 2633 | goto ddebug; |
| 2627 | 2634 | ||
| 2635 | module_bug_finalize(info.hdr, info.sechdrs, mod); | ||
| 2628 | list_add_rcu(&mod->list, &modules); | 2636 | list_add_rcu(&mod->list, &modules); |
| 2629 | mutex_unlock(&module_mutex); | 2637 | mutex_unlock(&module_mutex); |
| 2630 | 2638 | ||
| @@ -2650,6 +2658,8 @@ static struct module *load_module(void __user *umod, | |||
| 2650 | mutex_lock(&module_mutex); | 2658 | mutex_lock(&module_mutex); |
| 2651 | /* Unlink carefully: kallsyms could be walking list. */ | 2659 | /* Unlink carefully: kallsyms could be walking list. */ |
| 2652 | list_del_rcu(&mod->list); | 2660 | list_del_rcu(&mod->list); |
| 2661 | module_bug_cleanup(mod); | ||
| 2662 | |||
| 2653 | ddebug: | 2663 | ddebug: |
| 2654 | if (!mod->taints) | 2664 | if (!mod->taints) |
| 2655 | dynamic_debug_remove(info.debug); | 2665 | dynamic_debug_remove(info.debug); |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index db5b56064687..f309e8014c78 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -31,24 +31,18 @@ | |||
| 31 | #include <linux/kernel_stat.h> | 31 | #include <linux/kernel_stat.h> |
| 32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
| 33 | #include <linux/ftrace_event.h> | 33 | #include <linux/ftrace_event.h> |
| 34 | #include <linux/hw_breakpoint.h> | ||
| 35 | 34 | ||
| 36 | #include <asm/irq_regs.h> | 35 | #include <asm/irq_regs.h> |
| 37 | 36 | ||
| 38 | /* | 37 | atomic_t perf_task_events __read_mostly; |
| 39 | * Each CPU has a list of per CPU events: | ||
| 40 | */ | ||
| 41 | static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | ||
| 42 | |||
| 43 | int perf_max_events __read_mostly = 1; | ||
| 44 | static int perf_reserved_percpu __read_mostly; | ||
| 45 | static int perf_overcommit __read_mostly = 1; | ||
| 46 | |||
| 47 | static atomic_t nr_events __read_mostly; | ||
| 48 | static atomic_t nr_mmap_events __read_mostly; | 38 | static atomic_t nr_mmap_events __read_mostly; |
| 49 | static atomic_t nr_comm_events __read_mostly; | 39 | static atomic_t nr_comm_events __read_mostly; |
| 50 | static atomic_t nr_task_events __read_mostly; | 40 | static atomic_t nr_task_events __read_mostly; |
| 51 | 41 | ||
| 42 | static LIST_HEAD(pmus); | ||
| 43 | static DEFINE_MUTEX(pmus_lock); | ||
| 44 | static struct srcu_struct pmus_srcu; | ||
| 45 | |||
| 52 | /* | 46 | /* |
| 53 | * perf event paranoia level: | 47 | * perf event paranoia level: |
| 54 | * -1 - not paranoid at all | 48 | * -1 - not paranoid at all |
| @@ -67,36 +61,43 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; | |||
| 67 | 61 | ||
| 68 | static atomic64_t perf_event_id; | 62 | static atomic64_t perf_event_id; |
| 69 | 63 | ||
| 70 | /* | 64 | void __weak perf_event_print_debug(void) { } |
| 71 | * Lock for (sysadmin-configurable) event reservations: | ||
| 72 | */ | ||
| 73 | static DEFINE_SPINLOCK(perf_resource_lock); | ||
| 74 | 65 | ||
| 75 | /* | 66 | extern __weak const char *perf_pmu_name(void) |
| 76 | * Architecture provided APIs - weak aliases: | ||
| 77 | */ | ||
| 78 | extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
| 79 | { | 67 | { |
| 80 | return NULL; | 68 | return "pmu"; |
| 81 | } | 69 | } |
| 82 | 70 | ||
| 83 | void __weak hw_perf_disable(void) { barrier(); } | 71 | void perf_pmu_disable(struct pmu *pmu) |
| 84 | void __weak hw_perf_enable(void) { barrier(); } | 72 | { |
| 85 | 73 | int *count = this_cpu_ptr(pmu->pmu_disable_count); | |
| 86 | void __weak perf_event_print_debug(void) { } | 74 | if (!(*count)++) |
| 87 | 75 | pmu->pmu_disable(pmu); | |
| 88 | static DEFINE_PER_CPU(int, perf_disable_count); | 76 | } |
| 89 | 77 | ||
| 90 | void perf_disable(void) | 78 | void perf_pmu_enable(struct pmu *pmu) |
| 91 | { | 79 | { |
| 92 | if (!__get_cpu_var(perf_disable_count)++) | 80 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| 93 | hw_perf_disable(); | 81 | if (!--(*count)) |
| 82 | pmu->pmu_enable(pmu); | ||
| 94 | } | 83 | } |
| 95 | 84 | ||
| 96 | void perf_enable(void) | 85 | static DEFINE_PER_CPU(struct list_head, rotation_list); |
| 86 | |||
| 87 | /* | ||
| 88 | * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized | ||
| 89 | * because they're strictly cpu affine and rotate_start is called with IRQs | ||
| 90 | * disabled, while rotate_context is called from IRQ context. | ||
| 91 | */ | ||
| 92 | static void perf_pmu_rotate_start(struct pmu *pmu) | ||
| 97 | { | 93 | { |
| 98 | if (!--__get_cpu_var(perf_disable_count)) | 94 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 99 | hw_perf_enable(); | 95 | struct list_head *head = &__get_cpu_var(rotation_list); |
| 96 | |||
| 97 | WARN_ON(!irqs_disabled()); | ||
| 98 | |||
| 99 | if (list_empty(&cpuctx->rotation_list)) | ||
| 100 | list_add(&cpuctx->rotation_list, head); | ||
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | static void get_ctx(struct perf_event_context *ctx) | 103 | static void get_ctx(struct perf_event_context *ctx) |
| @@ -151,13 +152,13 @@ static u64 primary_event_id(struct perf_event *event) | |||
| 151 | * the context could get moved to another task. | 152 | * the context could get moved to another task. |
| 152 | */ | 153 | */ |
| 153 | static struct perf_event_context * | 154 | static struct perf_event_context * |
| 154 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | 155 | perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) |
| 155 | { | 156 | { |
| 156 | struct perf_event_context *ctx; | 157 | struct perf_event_context *ctx; |
| 157 | 158 | ||
| 158 | rcu_read_lock(); | 159 | rcu_read_lock(); |
| 159 | retry: | 160 | retry: |
| 160 | ctx = rcu_dereference(task->perf_event_ctxp); | 161 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
| 161 | if (ctx) { | 162 | if (ctx) { |
| 162 | /* | 163 | /* |
| 163 | * If this context is a clone of another, it might | 164 | * If this context is a clone of another, it might |
| @@ -170,7 +171,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) | |||
| 170 | * can't get swapped on us any more. | 171 | * can't get swapped on us any more. |
| 171 | */ | 172 | */ |
| 172 | raw_spin_lock_irqsave(&ctx->lock, *flags); | 173 | raw_spin_lock_irqsave(&ctx->lock, *flags); |
| 173 | if (ctx != rcu_dereference(task->perf_event_ctxp)) { | 174 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
| 174 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 175 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); |
| 175 | goto retry; | 176 | goto retry; |
| 176 | } | 177 | } |
| @@ -189,12 +190,13 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) | |||
| 189 | * can't get swapped to another task. This also increments its | 190 | * can't get swapped to another task. This also increments its |
| 190 | * reference count so that the context can't get freed. | 191 | * reference count so that the context can't get freed. |
| 191 | */ | 192 | */ |
| 192 | static struct perf_event_context *perf_pin_task_context(struct task_struct *task) | 193 | static struct perf_event_context * |
| 194 | perf_pin_task_context(struct task_struct *task, int ctxn) | ||
| 193 | { | 195 | { |
| 194 | struct perf_event_context *ctx; | 196 | struct perf_event_context *ctx; |
| 195 | unsigned long flags; | 197 | unsigned long flags; |
| 196 | 198 | ||
| 197 | ctx = perf_lock_task_context(task, &flags); | 199 | ctx = perf_lock_task_context(task, ctxn, &flags); |
| 198 | if (ctx) { | 200 | if (ctx) { |
| 199 | ++ctx->pin_count; | 201 | ++ctx->pin_count; |
| 200 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 202 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| @@ -302,6 +304,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) | |||
| 302 | } | 304 | } |
| 303 | 305 | ||
| 304 | list_add_rcu(&event->event_entry, &ctx->event_list); | 306 | list_add_rcu(&event->event_entry, &ctx->event_list); |
| 307 | if (!ctx->nr_events) | ||
| 308 | perf_pmu_rotate_start(ctx->pmu); | ||
| 305 | ctx->nr_events++; | 309 | ctx->nr_events++; |
| 306 | if (event->attr.inherit_stat) | 310 | if (event->attr.inherit_stat) |
| 307 | ctx->nr_stat++; | 311 | ctx->nr_stat++; |
| @@ -311,7 +315,12 @@ static void perf_group_attach(struct perf_event *event) | |||
| 311 | { | 315 | { |
| 312 | struct perf_event *group_leader = event->group_leader; | 316 | struct perf_event *group_leader = event->group_leader; |
| 313 | 317 | ||
| 314 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP); | 318 | /* |
| 319 | * We can have double attach due to group movement in perf_event_open. | ||
| 320 | */ | ||
| 321 | if (event->attach_state & PERF_ATTACH_GROUP) | ||
| 322 | return; | ||
| 323 | |||
| 315 | event->attach_state |= PERF_ATTACH_GROUP; | 324 | event->attach_state |= PERF_ATTACH_GROUP; |
| 316 | 325 | ||
| 317 | if (group_leader == event) | 326 | if (group_leader == event) |
| @@ -408,8 +417,8 @@ event_filter_match(struct perf_event *event) | |||
| 408 | return event->cpu == -1 || event->cpu == smp_processor_id(); | 417 | return event->cpu == -1 || event->cpu == smp_processor_id(); |
| 409 | } | 418 | } |
| 410 | 419 | ||
| 411 | static void | 420 | static int |
| 412 | event_sched_out(struct perf_event *event, | 421 | __event_sched_out(struct perf_event *event, |
| 413 | struct perf_cpu_context *cpuctx, | 422 | struct perf_cpu_context *cpuctx, |
| 414 | struct perf_event_context *ctx) | 423 | struct perf_event_context *ctx) |
| 415 | { | 424 | { |
| @@ -428,15 +437,14 @@ event_sched_out(struct perf_event *event, | |||
| 428 | } | 437 | } |
| 429 | 438 | ||
| 430 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 439 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 431 | return; | 440 | return 0; |
| 432 | 441 | ||
| 433 | event->state = PERF_EVENT_STATE_INACTIVE; | 442 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 434 | if (event->pending_disable) { | 443 | if (event->pending_disable) { |
| 435 | event->pending_disable = 0; | 444 | event->pending_disable = 0; |
| 436 | event->state = PERF_EVENT_STATE_OFF; | 445 | event->state = PERF_EVENT_STATE_OFF; |
| 437 | } | 446 | } |
| 438 | event->tstamp_stopped = ctx->time; | 447 | event->pmu->del(event, 0); |
| 439 | event->pmu->disable(event); | ||
| 440 | event->oncpu = -1; | 448 | event->oncpu = -1; |
| 441 | 449 | ||
| 442 | if (!is_software_event(event)) | 450 | if (!is_software_event(event)) |
| @@ -444,6 +452,19 @@ event_sched_out(struct perf_event *event, | |||
| 444 | ctx->nr_active--; | 452 | ctx->nr_active--; |
| 445 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 453 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 446 | cpuctx->exclusive = 0; | 454 | cpuctx->exclusive = 0; |
| 455 | return 1; | ||
| 456 | } | ||
| 457 | |||
| 458 | static void | ||
| 459 | event_sched_out(struct perf_event *event, | ||
| 460 | struct perf_cpu_context *cpuctx, | ||
| 461 | struct perf_event_context *ctx) | ||
| 462 | { | ||
| 463 | int ret; | ||
| 464 | |||
| 465 | ret = __event_sched_out(event, cpuctx, ctx); | ||
| 466 | if (ret) | ||
| 467 | event->tstamp_stopped = ctx->time; | ||
| 447 | } | 468 | } |
| 448 | 469 | ||
| 449 | static void | 470 | static void |
| @@ -466,6 +487,12 @@ group_sched_out(struct perf_event *group_event, | |||
| 466 | cpuctx->exclusive = 0; | 487 | cpuctx->exclusive = 0; |
| 467 | } | 488 | } |
| 468 | 489 | ||
| 490 | static inline struct perf_cpu_context * | ||
| 491 | __get_cpu_context(struct perf_event_context *ctx) | ||
| 492 | { | ||
| 493 | return this_cpu_ptr(ctx->pmu->pmu_cpu_context); | ||
| 494 | } | ||
| 495 | |||
| 469 | /* | 496 | /* |
| 470 | * Cross CPU call to remove a performance event | 497 | * Cross CPU call to remove a performance event |
| 471 | * | 498 | * |
| @@ -474,9 +501,9 @@ group_sched_out(struct perf_event *group_event, | |||
| 474 | */ | 501 | */ |
| 475 | static void __perf_event_remove_from_context(void *info) | 502 | static void __perf_event_remove_from_context(void *info) |
| 476 | { | 503 | { |
| 477 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 478 | struct perf_event *event = info; | 504 | struct perf_event *event = info; |
| 479 | struct perf_event_context *ctx = event->ctx; | 505 | struct perf_event_context *ctx = event->ctx; |
| 506 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
| 480 | 507 | ||
| 481 | /* | 508 | /* |
| 482 | * If this is a task context, we need to check whether it is | 509 | * If this is a task context, we need to check whether it is |
| @@ -487,27 +514,11 @@ static void __perf_event_remove_from_context(void *info) | |||
| 487 | return; | 514 | return; |
| 488 | 515 | ||
| 489 | raw_spin_lock(&ctx->lock); | 516 | raw_spin_lock(&ctx->lock); |
| 490 | /* | ||
| 491 | * Protect the list operation against NMI by disabling the | ||
| 492 | * events on a global level. | ||
| 493 | */ | ||
| 494 | perf_disable(); | ||
| 495 | 517 | ||
| 496 | event_sched_out(event, cpuctx, ctx); | 518 | event_sched_out(event, cpuctx, ctx); |
| 497 | 519 | ||
| 498 | list_del_event(event, ctx); | 520 | list_del_event(event, ctx); |
| 499 | 521 | ||
| 500 | if (!ctx->task) { | ||
| 501 | /* | ||
| 502 | * Allow more per task events with respect to the | ||
| 503 | * reservation: | ||
| 504 | */ | ||
| 505 | cpuctx->max_pertask = | ||
| 506 | min(perf_max_events - ctx->nr_events, | ||
| 507 | perf_max_events - perf_reserved_percpu); | ||
| 508 | } | ||
| 509 | |||
| 510 | perf_enable(); | ||
| 511 | raw_spin_unlock(&ctx->lock); | 522 | raw_spin_unlock(&ctx->lock); |
| 512 | } | 523 | } |
| 513 | 524 | ||
| @@ -572,8 +583,8 @@ retry: | |||
| 572 | static void __perf_event_disable(void *info) | 583 | static void __perf_event_disable(void *info) |
| 573 | { | 584 | { |
| 574 | struct perf_event *event = info; | 585 | struct perf_event *event = info; |
| 575 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 576 | struct perf_event_context *ctx = event->ctx; | 586 | struct perf_event_context *ctx = event->ctx; |
| 587 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
| 577 | 588 | ||
| 578 | /* | 589 | /* |
| 579 | * If this is a per-task event, need to check whether this | 590 | * If this is a per-task event, need to check whether this |
| @@ -628,7 +639,7 @@ void perf_event_disable(struct perf_event *event) | |||
| 628 | return; | 639 | return; |
| 629 | } | 640 | } |
| 630 | 641 | ||
| 631 | retry: | 642 | retry: |
| 632 | task_oncpu_function_call(task, __perf_event_disable, event); | 643 | task_oncpu_function_call(task, __perf_event_disable, event); |
| 633 | 644 | ||
| 634 | raw_spin_lock_irq(&ctx->lock); | 645 | raw_spin_lock_irq(&ctx->lock); |
| @@ -653,7 +664,7 @@ void perf_event_disable(struct perf_event *event) | |||
| 653 | } | 664 | } |
| 654 | 665 | ||
| 655 | static int | 666 | static int |
| 656 | event_sched_in(struct perf_event *event, | 667 | __event_sched_in(struct perf_event *event, |
| 657 | struct perf_cpu_context *cpuctx, | 668 | struct perf_cpu_context *cpuctx, |
| 658 | struct perf_event_context *ctx) | 669 | struct perf_event_context *ctx) |
| 659 | { | 670 | { |
| @@ -667,14 +678,12 @@ event_sched_in(struct perf_event *event, | |||
| 667 | */ | 678 | */ |
| 668 | smp_wmb(); | 679 | smp_wmb(); |
| 669 | 680 | ||
| 670 | if (event->pmu->enable(event)) { | 681 | if (event->pmu->add(event, PERF_EF_START)) { |
| 671 | event->state = PERF_EVENT_STATE_INACTIVE; | 682 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 672 | event->oncpu = -1; | 683 | event->oncpu = -1; |
| 673 | return -EAGAIN; | 684 | return -EAGAIN; |
| 674 | } | 685 | } |
| 675 | 686 | ||
| 676 | event->tstamp_running += ctx->time - event->tstamp_stopped; | ||
| 677 | |||
| 678 | if (!is_software_event(event)) | 687 | if (!is_software_event(event)) |
| 679 | cpuctx->active_oncpu++; | 688 | cpuctx->active_oncpu++; |
| 680 | ctx->nr_active++; | 689 | ctx->nr_active++; |
| @@ -685,28 +694,56 @@ event_sched_in(struct perf_event *event, | |||
| 685 | return 0; | 694 | return 0; |
| 686 | } | 695 | } |
| 687 | 696 | ||
| 697 | static inline int | ||
| 698 | event_sched_in(struct perf_event *event, | ||
| 699 | struct perf_cpu_context *cpuctx, | ||
| 700 | struct perf_event_context *ctx) | ||
| 701 | { | ||
| 702 | int ret = __event_sched_in(event, cpuctx, ctx); | ||
| 703 | if (ret) | ||
| 704 | return ret; | ||
| 705 | event->tstamp_running += ctx->time - event->tstamp_stopped; | ||
| 706 | return 0; | ||
| 707 | } | ||
| 708 | |||
| 709 | static void | ||
| 710 | group_commit_event_sched_in(struct perf_event *group_event, | ||
| 711 | struct perf_cpu_context *cpuctx, | ||
| 712 | struct perf_event_context *ctx) | ||
| 713 | { | ||
| 714 | struct perf_event *event; | ||
| 715 | u64 now = ctx->time; | ||
| 716 | |||
| 717 | group_event->tstamp_running += now - group_event->tstamp_stopped; | ||
| 718 | /* | ||
| 719 | * Schedule in siblings as one group (if any): | ||
| 720 | */ | ||
| 721 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | ||
| 722 | event->tstamp_running += now - event->tstamp_stopped; | ||
| 723 | } | ||
| 724 | } | ||
| 725 | |||
| 688 | static int | 726 | static int |
| 689 | group_sched_in(struct perf_event *group_event, | 727 | group_sched_in(struct perf_event *group_event, |
| 690 | struct perf_cpu_context *cpuctx, | 728 | struct perf_cpu_context *cpuctx, |
| 691 | struct perf_event_context *ctx) | 729 | struct perf_event_context *ctx) |
| 692 | { | 730 | { |
| 693 | struct perf_event *event, *partial_group = NULL; | 731 | struct perf_event *event, *partial_group = NULL; |
| 694 | const struct pmu *pmu = group_event->pmu; | 732 | struct pmu *pmu = group_event->pmu; |
| 695 | bool txn = false; | ||
| 696 | 733 | ||
| 697 | if (group_event->state == PERF_EVENT_STATE_OFF) | 734 | if (group_event->state == PERF_EVENT_STATE_OFF) |
| 698 | return 0; | 735 | return 0; |
| 699 | 736 | ||
| 700 | /* Check if group transaction availabe */ | 737 | pmu->start_txn(pmu); |
| 701 | if (pmu->start_txn) | ||
| 702 | txn = true; | ||
| 703 | 738 | ||
| 704 | if (txn) | 739 | /* |
| 705 | pmu->start_txn(pmu); | 740 | * use __event_sched_in() to delay updating tstamp_running |
| 706 | 741 | * until the transaction is committed. In case of failure | |
| 707 | if (event_sched_in(group_event, cpuctx, ctx)) { | 742 | * we will keep an unmodified tstamp_running which is a |
| 708 | if (txn) | 743 | * requirement to get correct timing information |
| 709 | pmu->cancel_txn(pmu); | 744 | */ |
| 745 | if (__event_sched_in(group_event, cpuctx, ctx)) { | ||
| 746 | pmu->cancel_txn(pmu); | ||
| 710 | return -EAGAIN; | 747 | return -EAGAIN; |
| 711 | } | 748 | } |
| 712 | 749 | ||
| @@ -714,29 +751,33 @@ group_sched_in(struct perf_event *group_event, | |||
| 714 | * Schedule in siblings as one group (if any): | 751 | * Schedule in siblings as one group (if any): |
| 715 | */ | 752 | */ |
| 716 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 753 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
| 717 | if (event_sched_in(event, cpuctx, ctx)) { | 754 | if (__event_sched_in(event, cpuctx, ctx)) { |
| 718 | partial_group = event; | 755 | partial_group = event; |
| 719 | goto group_error; | 756 | goto group_error; |
| 720 | } | 757 | } |
| 721 | } | 758 | } |
| 722 | 759 | ||
| 723 | if (!txn || !pmu->commit_txn(pmu)) | 760 | if (!pmu->commit_txn(pmu)) { |
| 761 | /* commit tstamp_running */ | ||
| 762 | group_commit_event_sched_in(group_event, cpuctx, ctx); | ||
| 724 | return 0; | 763 | return 0; |
| 725 | 764 | } | |
| 726 | group_error: | 765 | group_error: |
| 727 | /* | 766 | /* |
| 728 | * Groups can be scheduled in as one unit only, so undo any | 767 | * Groups can be scheduled in as one unit only, so undo any |
| 729 | * partial group before returning: | 768 | * partial group before returning: |
| 769 | * | ||
| 770 | * use __event_sched_out() to avoid updating tstamp_stopped | ||
| 771 | * because the event never actually ran | ||
| 730 | */ | 772 | */ |
| 731 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 773 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
| 732 | if (event == partial_group) | 774 | if (event == partial_group) |
| 733 | break; | 775 | break; |
| 734 | event_sched_out(event, cpuctx, ctx); | 776 | __event_sched_out(event, cpuctx, ctx); |
| 735 | } | 777 | } |
| 736 | event_sched_out(group_event, cpuctx, ctx); | 778 | __event_sched_out(group_event, cpuctx, ctx); |
| 737 | 779 | ||
| 738 | if (txn) | 780 | pmu->cancel_txn(pmu); |
| 739 | pmu->cancel_txn(pmu); | ||
| 740 | 781 | ||
| 741 | return -EAGAIN; | 782 | return -EAGAIN; |
| 742 | } | 783 | } |
| @@ -789,10 +830,10 @@ static void add_event_to_ctx(struct perf_event *event, | |||
| 789 | */ | 830 | */ |
| 790 | static void __perf_install_in_context(void *info) | 831 | static void __perf_install_in_context(void *info) |
| 791 | { | 832 | { |
| 792 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 793 | struct perf_event *event = info; | 833 | struct perf_event *event = info; |
| 794 | struct perf_event_context *ctx = event->ctx; | 834 | struct perf_event_context *ctx = event->ctx; |
| 795 | struct perf_event *leader = event->group_leader; | 835 | struct perf_event *leader = event->group_leader; |
| 836 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
| 796 | int err; | 837 | int err; |
| 797 | 838 | ||
| 798 | /* | 839 | /* |
| @@ -812,12 +853,6 @@ static void __perf_install_in_context(void *info) | |||
| 812 | ctx->is_active = 1; | 853 | ctx->is_active = 1; |
| 813 | update_context_time(ctx); | 854 | update_context_time(ctx); |
| 814 | 855 | ||
| 815 | /* | ||
| 816 | * Protect the list operation against NMI by disabling the | ||
| 817 | * events on a global level. NOP for non NMI based events. | ||
| 818 | */ | ||
| 819 | perf_disable(); | ||
| 820 | |||
| 821 | add_event_to_ctx(event, ctx); | 856 | add_event_to_ctx(event, ctx); |
| 822 | 857 | ||
| 823 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 858 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| @@ -855,12 +890,7 @@ static void __perf_install_in_context(void *info) | |||
| 855 | } | 890 | } |
| 856 | } | 891 | } |
| 857 | 892 | ||
| 858 | if (!err && !ctx->task && cpuctx->max_pertask) | 893 | unlock: |
| 859 | cpuctx->max_pertask--; | ||
| 860 | |||
| 861 | unlock: | ||
| 862 | perf_enable(); | ||
| 863 | |||
| 864 | raw_spin_unlock(&ctx->lock); | 894 | raw_spin_unlock(&ctx->lock); |
| 865 | } | 895 | } |
| 866 | 896 | ||
| @@ -883,6 +913,8 @@ perf_install_in_context(struct perf_event_context *ctx, | |||
| 883 | { | 913 | { |
| 884 | struct task_struct *task = ctx->task; | 914 | struct task_struct *task = ctx->task; |
| 885 | 915 | ||
| 916 | event->ctx = ctx; | ||
| 917 | |||
| 886 | if (!task) { | 918 | if (!task) { |
| 887 | /* | 919 | /* |
| 888 | * Per cpu events are installed via an smp call and | 920 | * Per cpu events are installed via an smp call and |
| @@ -931,10 +963,12 @@ static void __perf_event_mark_enabled(struct perf_event *event, | |||
| 931 | 963 | ||
| 932 | event->state = PERF_EVENT_STATE_INACTIVE; | 964 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 933 | event->tstamp_enabled = ctx->time - event->total_time_enabled; | 965 | event->tstamp_enabled = ctx->time - event->total_time_enabled; |
| 934 | list_for_each_entry(sub, &event->sibling_list, group_entry) | 966 | list_for_each_entry(sub, &event->sibling_list, group_entry) { |
| 935 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) | 967 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) { |
| 936 | sub->tstamp_enabled = | 968 | sub->tstamp_enabled = |
| 937 | ctx->time - sub->total_time_enabled; | 969 | ctx->time - sub->total_time_enabled; |
| 970 | } | ||
| 971 | } | ||
| 938 | } | 972 | } |
| 939 | 973 | ||
| 940 | /* | 974 | /* |
| @@ -943,9 +977,9 @@ static void __perf_event_mark_enabled(struct perf_event *event, | |||
| 943 | static void __perf_event_enable(void *info) | 977 | static void __perf_event_enable(void *info) |
| 944 | { | 978 | { |
| 945 | struct perf_event *event = info; | 979 | struct perf_event *event = info; |
| 946 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 947 | struct perf_event_context *ctx = event->ctx; | 980 | struct perf_event_context *ctx = event->ctx; |
| 948 | struct perf_event *leader = event->group_leader; | 981 | struct perf_event *leader = event->group_leader; |
| 982 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
| 949 | int err; | 983 | int err; |
| 950 | 984 | ||
| 951 | /* | 985 | /* |
| @@ -979,12 +1013,10 @@ static void __perf_event_enable(void *info) | |||
| 979 | if (!group_can_go_on(event, cpuctx, 1)) { | 1013 | if (!group_can_go_on(event, cpuctx, 1)) { |
| 980 | err = -EEXIST; | 1014 | err = -EEXIST; |
| 981 | } else { | 1015 | } else { |
| 982 | perf_disable(); | ||
| 983 | if (event == leader) | 1016 | if (event == leader) |
| 984 | err = group_sched_in(event, cpuctx, ctx); | 1017 | err = group_sched_in(event, cpuctx, ctx); |
| 985 | else | 1018 | else |
| 986 | err = event_sched_in(event, cpuctx, ctx); | 1019 | err = event_sched_in(event, cpuctx, ctx); |
| 987 | perf_enable(); | ||
| 988 | } | 1020 | } |
| 989 | 1021 | ||
| 990 | if (err) { | 1022 | if (err) { |
| @@ -1000,7 +1032,7 @@ static void __perf_event_enable(void *info) | |||
| 1000 | } | 1032 | } |
| 1001 | } | 1033 | } |
| 1002 | 1034 | ||
| 1003 | unlock: | 1035 | unlock: |
| 1004 | raw_spin_unlock(&ctx->lock); | 1036 | raw_spin_unlock(&ctx->lock); |
| 1005 | } | 1037 | } |
| 1006 | 1038 | ||
| @@ -1041,7 +1073,7 @@ void perf_event_enable(struct perf_event *event) | |||
| 1041 | if (event->state == PERF_EVENT_STATE_ERROR) | 1073 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 1042 | event->state = PERF_EVENT_STATE_OFF; | 1074 | event->state = PERF_EVENT_STATE_OFF; |
| 1043 | 1075 | ||
| 1044 | retry: | 1076 | retry: |
| 1045 | raw_spin_unlock_irq(&ctx->lock); | 1077 | raw_spin_unlock_irq(&ctx->lock); |
| 1046 | task_oncpu_function_call(task, __perf_event_enable, event); | 1078 | task_oncpu_function_call(task, __perf_event_enable, event); |
| 1047 | 1079 | ||
| @@ -1061,7 +1093,7 @@ void perf_event_enable(struct perf_event *event) | |||
| 1061 | if (event->state == PERF_EVENT_STATE_OFF) | 1093 | if (event->state == PERF_EVENT_STATE_OFF) |
| 1062 | __perf_event_mark_enabled(event, ctx); | 1094 | __perf_event_mark_enabled(event, ctx); |
| 1063 | 1095 | ||
| 1064 | out: | 1096 | out: |
| 1065 | raw_spin_unlock_irq(&ctx->lock); | 1097 | raw_spin_unlock_irq(&ctx->lock); |
| 1066 | } | 1098 | } |
| 1067 | 1099 | ||
| @@ -1092,26 +1124,26 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
| 1092 | struct perf_event *event; | 1124 | struct perf_event *event; |
| 1093 | 1125 | ||
| 1094 | raw_spin_lock(&ctx->lock); | 1126 | raw_spin_lock(&ctx->lock); |
| 1127 | perf_pmu_disable(ctx->pmu); | ||
| 1095 | ctx->is_active = 0; | 1128 | ctx->is_active = 0; |
| 1096 | if (likely(!ctx->nr_events)) | 1129 | if (likely(!ctx->nr_events)) |
| 1097 | goto out; | 1130 | goto out; |
| 1098 | update_context_time(ctx); | 1131 | update_context_time(ctx); |
| 1099 | 1132 | ||
| 1100 | perf_disable(); | ||
| 1101 | if (!ctx->nr_active) | 1133 | if (!ctx->nr_active) |
| 1102 | goto out_enable; | 1134 | goto out; |
| 1103 | 1135 | ||
| 1104 | if (event_type & EVENT_PINNED) | 1136 | if (event_type & EVENT_PINNED) { |
| 1105 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) | 1137 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) |
| 1106 | group_sched_out(event, cpuctx, ctx); | 1138 | group_sched_out(event, cpuctx, ctx); |
| 1139 | } | ||
| 1107 | 1140 | ||
| 1108 | if (event_type & EVENT_FLEXIBLE) | 1141 | if (event_type & EVENT_FLEXIBLE) { |
| 1109 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) | 1142 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) |
| 1110 | group_sched_out(event, cpuctx, ctx); | 1143 | group_sched_out(event, cpuctx, ctx); |
| 1111 | 1144 | } | |
| 1112 | out_enable: | 1145 | out: |
| 1113 | perf_enable(); | 1146 | perf_pmu_enable(ctx->pmu); |
| 1114 | out: | ||
| 1115 | raw_spin_unlock(&ctx->lock); | 1147 | raw_spin_unlock(&ctx->lock); |
| 1116 | } | 1148 | } |
| 1117 | 1149 | ||
| @@ -1209,34 +1241,25 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, | |||
| 1209 | } | 1241 | } |
| 1210 | } | 1242 | } |
| 1211 | 1243 | ||
| 1212 | /* | 1244 | void perf_event_context_sched_out(struct task_struct *task, int ctxn, |
| 1213 | * Called from scheduler to remove the events of the current task, | 1245 | struct task_struct *next) |
| 1214 | * with interrupts disabled. | ||
| 1215 | * | ||
| 1216 | * We stop each event and update the event value in event->count. | ||
| 1217 | * | ||
| 1218 | * This does not protect us against NMI, but disable() | ||
| 1219 | * sets the disabled bit in the control field of event _before_ | ||
| 1220 | * accessing the event control register. If a NMI hits, then it will | ||
| 1221 | * not restart the event. | ||
| 1222 | */ | ||
| 1223 | void perf_event_task_sched_out(struct task_struct *task, | ||
| 1224 | struct task_struct *next) | ||
| 1225 | { | 1246 | { |
| 1226 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1247 | struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; |
| 1227 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
| 1228 | struct perf_event_context *next_ctx; | 1248 | struct perf_event_context *next_ctx; |
| 1229 | struct perf_event_context *parent; | 1249 | struct perf_event_context *parent; |
| 1250 | struct perf_cpu_context *cpuctx; | ||
| 1230 | int do_switch = 1; | 1251 | int do_switch = 1; |
| 1231 | 1252 | ||
| 1232 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1253 | if (likely(!ctx)) |
| 1254 | return; | ||
| 1233 | 1255 | ||
| 1234 | if (likely(!ctx || !cpuctx->task_ctx)) | 1256 | cpuctx = __get_cpu_context(ctx); |
| 1257 | if (!cpuctx->task_ctx) | ||
| 1235 | return; | 1258 | return; |
| 1236 | 1259 | ||
| 1237 | rcu_read_lock(); | 1260 | rcu_read_lock(); |
| 1238 | parent = rcu_dereference(ctx->parent_ctx); | 1261 | parent = rcu_dereference(ctx->parent_ctx); |
| 1239 | next_ctx = next->perf_event_ctxp; | 1262 | next_ctx = next->perf_event_ctxp[ctxn]; |
| 1240 | if (parent && next_ctx && | 1263 | if (parent && next_ctx && |
| 1241 | rcu_dereference(next_ctx->parent_ctx) == parent) { | 1264 | rcu_dereference(next_ctx->parent_ctx) == parent) { |
| 1242 | /* | 1265 | /* |
| @@ -1255,8 +1278,8 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
| 1255 | * XXX do we need a memory barrier of sorts | 1278 | * XXX do we need a memory barrier of sorts |
| 1256 | * wrt to rcu_dereference() of perf_event_ctxp | 1279 | * wrt to rcu_dereference() of perf_event_ctxp |
| 1257 | */ | 1280 | */ |
| 1258 | task->perf_event_ctxp = next_ctx; | 1281 | task->perf_event_ctxp[ctxn] = next_ctx; |
| 1259 | next->perf_event_ctxp = ctx; | 1282 | next->perf_event_ctxp[ctxn] = ctx; |
| 1260 | ctx->task = next; | 1283 | ctx->task = next; |
| 1261 | next_ctx->task = task; | 1284 | next_ctx->task = task; |
| 1262 | do_switch = 0; | 1285 | do_switch = 0; |
| @@ -1274,10 +1297,35 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
| 1274 | } | 1297 | } |
| 1275 | } | 1298 | } |
| 1276 | 1299 | ||
| 1300 | #define for_each_task_context_nr(ctxn) \ | ||
| 1301 | for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) | ||
| 1302 | |||
| 1303 | /* | ||
| 1304 | * Called from scheduler to remove the events of the current task, | ||
| 1305 | * with interrupts disabled. | ||
| 1306 | * | ||
| 1307 | * We stop each event and update the event value in event->count. | ||
| 1308 | * | ||
| 1309 | * This does not protect us against NMI, but disable() | ||
| 1310 | * sets the disabled bit in the control field of event _before_ | ||
| 1311 | * accessing the event control register. If a NMI hits, then it will | ||
| 1312 | * not restart the event. | ||
| 1313 | */ | ||
| 1314 | void __perf_event_task_sched_out(struct task_struct *task, | ||
| 1315 | struct task_struct *next) | ||
| 1316 | { | ||
| 1317 | int ctxn; | ||
| 1318 | |||
| 1319 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | ||
| 1320 | |||
| 1321 | for_each_task_context_nr(ctxn) | ||
| 1322 | perf_event_context_sched_out(task, ctxn, next); | ||
| 1323 | } | ||
| 1324 | |||
| 1277 | static void task_ctx_sched_out(struct perf_event_context *ctx, | 1325 | static void task_ctx_sched_out(struct perf_event_context *ctx, |
| 1278 | enum event_type_t event_type) | 1326 | enum event_type_t event_type) |
| 1279 | { | 1327 | { |
| 1280 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1328 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 1281 | 1329 | ||
| 1282 | if (!cpuctx->task_ctx) | 1330 | if (!cpuctx->task_ctx) |
| 1283 | return; | 1331 | return; |
| @@ -1292,14 +1340,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, | |||
| 1292 | /* | 1340 | /* |
| 1293 | * Called with IRQs disabled | 1341 | * Called with IRQs disabled |
| 1294 | */ | 1342 | */ |
| 1295 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) | ||
| 1296 | { | ||
| 1297 | task_ctx_sched_out(ctx, EVENT_ALL); | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | /* | ||
| 1301 | * Called with IRQs disabled | ||
| 1302 | */ | ||
| 1303 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | 1343 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 1304 | enum event_type_t event_type) | 1344 | enum event_type_t event_type) |
| 1305 | { | 1345 | { |
| @@ -1350,9 +1390,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, | |||
| 1350 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 1390 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| 1351 | continue; | 1391 | continue; |
| 1352 | 1392 | ||
| 1353 | if (group_can_go_on(event, cpuctx, can_add_hw)) | 1393 | if (group_can_go_on(event, cpuctx, can_add_hw)) { |
| 1354 | if (group_sched_in(event, cpuctx, ctx)) | 1394 | if (group_sched_in(event, cpuctx, ctx)) |
| 1355 | can_add_hw = 0; | 1395 | can_add_hw = 0; |
| 1396 | } | ||
| 1356 | } | 1397 | } |
| 1357 | } | 1398 | } |
| 1358 | 1399 | ||
| @@ -1368,8 +1409,6 @@ ctx_sched_in(struct perf_event_context *ctx, | |||
| 1368 | 1409 | ||
| 1369 | ctx->timestamp = perf_clock(); | 1410 | ctx->timestamp = perf_clock(); |
| 1370 | 1411 | ||
| 1371 | perf_disable(); | ||
| 1372 | |||
| 1373 | /* | 1412 | /* |
| 1374 | * First go through the list and put on any pinned groups | 1413 | * First go through the list and put on any pinned groups |
| 1375 | * in order to give them the best chance of going on. | 1414 | * in order to give them the best chance of going on. |
| @@ -1381,8 +1420,7 @@ ctx_sched_in(struct perf_event_context *ctx, | |||
| 1381 | if (event_type & EVENT_FLEXIBLE) | 1420 | if (event_type & EVENT_FLEXIBLE) |
| 1382 | ctx_flexible_sched_in(ctx, cpuctx); | 1421 | ctx_flexible_sched_in(ctx, cpuctx); |
| 1383 | 1422 | ||
| 1384 | perf_enable(); | 1423 | out: |
| 1385 | out: | ||
| 1386 | raw_spin_unlock(&ctx->lock); | 1424 | raw_spin_unlock(&ctx->lock); |
| 1387 | } | 1425 | } |
| 1388 | 1426 | ||
| @@ -1394,43 +1432,28 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | |||
| 1394 | ctx_sched_in(ctx, cpuctx, event_type); | 1432 | ctx_sched_in(ctx, cpuctx, event_type); |
| 1395 | } | 1433 | } |
| 1396 | 1434 | ||
| 1397 | static void task_ctx_sched_in(struct task_struct *task, | 1435 | static void task_ctx_sched_in(struct perf_event_context *ctx, |
| 1398 | enum event_type_t event_type) | 1436 | enum event_type_t event_type) |
| 1399 | { | 1437 | { |
| 1400 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1438 | struct perf_cpu_context *cpuctx; |
| 1401 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
| 1402 | 1439 | ||
| 1403 | if (likely(!ctx)) | 1440 | cpuctx = __get_cpu_context(ctx); |
| 1404 | return; | ||
| 1405 | if (cpuctx->task_ctx == ctx) | 1441 | if (cpuctx->task_ctx == ctx) |
| 1406 | return; | 1442 | return; |
| 1443 | |||
| 1407 | ctx_sched_in(ctx, cpuctx, event_type); | 1444 | ctx_sched_in(ctx, cpuctx, event_type); |
| 1408 | cpuctx->task_ctx = ctx; | 1445 | cpuctx->task_ctx = ctx; |
| 1409 | } | 1446 | } |
| 1410 | /* | ||
| 1411 | * Called from scheduler to add the events of the current task | ||
| 1412 | * with interrupts disabled. | ||
| 1413 | * | ||
| 1414 | * We restore the event value and then enable it. | ||
| 1415 | * | ||
| 1416 | * This does not protect us against NMI, but enable() | ||
| 1417 | * sets the enabled bit in the control field of event _before_ | ||
| 1418 | * accessing the event control register. If a NMI hits, then it will | ||
| 1419 | * keep the event running. | ||
| 1420 | */ | ||
| 1421 | void perf_event_task_sched_in(struct task_struct *task) | ||
| 1422 | { | ||
| 1423 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 1424 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
| 1425 | 1447 | ||
| 1426 | if (likely(!ctx)) | 1448 | void perf_event_context_sched_in(struct perf_event_context *ctx) |
| 1427 | return; | 1449 | { |
| 1450 | struct perf_cpu_context *cpuctx; | ||
| 1428 | 1451 | ||
| 1452 | cpuctx = __get_cpu_context(ctx); | ||
| 1429 | if (cpuctx->task_ctx == ctx) | 1453 | if (cpuctx->task_ctx == ctx) |
| 1430 | return; | 1454 | return; |
| 1431 | 1455 | ||
| 1432 | perf_disable(); | 1456 | perf_pmu_disable(ctx->pmu); |
| 1433 | |||
| 1434 | /* | 1457 | /* |
| 1435 | * We want to keep the following priority order: | 1458 | * We want to keep the following priority order: |
| 1436 | * cpu pinned (that don't need to move), task pinned, | 1459 | * cpu pinned (that don't need to move), task pinned, |
| @@ -1444,7 +1467,37 @@ void perf_event_task_sched_in(struct task_struct *task) | |||
| 1444 | 1467 | ||
| 1445 | cpuctx->task_ctx = ctx; | 1468 | cpuctx->task_ctx = ctx; |
| 1446 | 1469 | ||
| 1447 | perf_enable(); | 1470 | /* |
| 1471 | * Since these rotations are per-cpu, we need to ensure the | ||
| 1472 | * cpu-context we got scheduled on is actually rotating. | ||
| 1473 | */ | ||
| 1474 | perf_pmu_rotate_start(ctx->pmu); | ||
| 1475 | perf_pmu_enable(ctx->pmu); | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | /* | ||
| 1479 | * Called from scheduler to add the events of the current task | ||
| 1480 | * with interrupts disabled. | ||
| 1481 | * | ||
| 1482 | * We restore the event value and then enable it. | ||
| 1483 | * | ||
| 1484 | * This does not protect us against NMI, but enable() | ||
| 1485 | * sets the enabled bit in the control field of event _before_ | ||
| 1486 | * accessing the event control register. If a NMI hits, then it will | ||
| 1487 | * keep the event running. | ||
| 1488 | */ | ||
| 1489 | void __perf_event_task_sched_in(struct task_struct *task) | ||
| 1490 | { | ||
| 1491 | struct perf_event_context *ctx; | ||
| 1492 | int ctxn; | ||
| 1493 | |||
| 1494 | for_each_task_context_nr(ctxn) { | ||
| 1495 | ctx = task->perf_event_ctxp[ctxn]; | ||
| 1496 | if (likely(!ctx)) | ||
| 1497 | continue; | ||
| 1498 | |||
| 1499 | perf_event_context_sched_in(ctx); | ||
| 1500 | } | ||
| 1448 | } | 1501 | } |
| 1449 | 1502 | ||
| 1450 | #define MAX_INTERRUPTS (~0ULL) | 1503 | #define MAX_INTERRUPTS (~0ULL) |
| @@ -1524,22 +1577,6 @@ do { \ | |||
| 1524 | return div64_u64(dividend, divisor); | 1577 | return div64_u64(dividend, divisor); |
| 1525 | } | 1578 | } |
| 1526 | 1579 | ||
| 1527 | static void perf_event_stop(struct perf_event *event) | ||
| 1528 | { | ||
| 1529 | if (!event->pmu->stop) | ||
| 1530 | return event->pmu->disable(event); | ||
| 1531 | |||
| 1532 | return event->pmu->stop(event); | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | static int perf_event_start(struct perf_event *event) | ||
| 1536 | { | ||
| 1537 | if (!event->pmu->start) | ||
| 1538 | return event->pmu->enable(event); | ||
| 1539 | |||
| 1540 | return event->pmu->start(event); | ||
| 1541 | } | ||
| 1542 | |||
| 1543 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 1580 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) |
| 1544 | { | 1581 | { |
| 1545 | struct hw_perf_event *hwc = &event->hw; | 1582 | struct hw_perf_event *hwc = &event->hw; |
| @@ -1559,15 +1596,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
| 1559 | hwc->sample_period = sample_period; | 1596 | hwc->sample_period = sample_period; |
| 1560 | 1597 | ||
| 1561 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 1598 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
| 1562 | perf_disable(); | 1599 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 1563 | perf_event_stop(event); | ||
| 1564 | local64_set(&hwc->period_left, 0); | 1600 | local64_set(&hwc->period_left, 0); |
| 1565 | perf_event_start(event); | 1601 | event->pmu->start(event, PERF_EF_RELOAD); |
| 1566 | perf_enable(); | ||
| 1567 | } | 1602 | } |
| 1568 | } | 1603 | } |
| 1569 | 1604 | ||
| 1570 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | 1605 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) |
| 1571 | { | 1606 | { |
| 1572 | struct perf_event *event; | 1607 | struct perf_event *event; |
| 1573 | struct hw_perf_event *hwc; | 1608 | struct hw_perf_event *hwc; |
| @@ -1592,23 +1627,19 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
| 1592 | */ | 1627 | */ |
| 1593 | if (interrupts == MAX_INTERRUPTS) { | 1628 | if (interrupts == MAX_INTERRUPTS) { |
| 1594 | perf_log_throttle(event, 1); | 1629 | perf_log_throttle(event, 1); |
| 1595 | perf_disable(); | 1630 | event->pmu->start(event, 0); |
| 1596 | event->pmu->unthrottle(event); | ||
| 1597 | perf_enable(); | ||
| 1598 | } | 1631 | } |
| 1599 | 1632 | ||
| 1600 | if (!event->attr.freq || !event->attr.sample_freq) | 1633 | if (!event->attr.freq || !event->attr.sample_freq) |
| 1601 | continue; | 1634 | continue; |
| 1602 | 1635 | ||
| 1603 | perf_disable(); | ||
| 1604 | event->pmu->read(event); | 1636 | event->pmu->read(event); |
| 1605 | now = local64_read(&event->count); | 1637 | now = local64_read(&event->count); |
| 1606 | delta = now - hwc->freq_count_stamp; | 1638 | delta = now - hwc->freq_count_stamp; |
| 1607 | hwc->freq_count_stamp = now; | 1639 | hwc->freq_count_stamp = now; |
| 1608 | 1640 | ||
| 1609 | if (delta > 0) | 1641 | if (delta > 0) |
| 1610 | perf_adjust_period(event, TICK_NSEC, delta); | 1642 | perf_adjust_period(event, period, delta); |
| 1611 | perf_enable(); | ||
| 1612 | } | 1643 | } |
| 1613 | raw_spin_unlock(&ctx->lock); | 1644 | raw_spin_unlock(&ctx->lock); |
| 1614 | } | 1645 | } |
| @@ -1626,32 +1657,38 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
| 1626 | raw_spin_unlock(&ctx->lock); | 1657 | raw_spin_unlock(&ctx->lock); |
| 1627 | } | 1658 | } |
| 1628 | 1659 | ||
| 1629 | void perf_event_task_tick(struct task_struct *curr) | 1660 | /* |
| 1661 | * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized | ||
| 1662 | * because they're strictly cpu affine and rotate_start is called with IRQs | ||
| 1663 | * disabled, while rotate_context is called from IRQ context. | ||
| 1664 | */ | ||
| 1665 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) | ||
| 1630 | { | 1666 | { |
| 1631 | struct perf_cpu_context *cpuctx; | 1667 | u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; |
| 1632 | struct perf_event_context *ctx; | 1668 | struct perf_event_context *ctx = NULL; |
| 1633 | int rotate = 0; | 1669 | int rotate = 0, remove = 1; |
| 1634 | |||
| 1635 | if (!atomic_read(&nr_events)) | ||
| 1636 | return; | ||
| 1637 | 1670 | ||
| 1638 | cpuctx = &__get_cpu_var(perf_cpu_context); | 1671 | if (cpuctx->ctx.nr_events) { |
| 1639 | if (cpuctx->ctx.nr_events && | 1672 | remove = 0; |
| 1640 | cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | 1673 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) |
| 1641 | rotate = 1; | 1674 | rotate = 1; |
| 1675 | } | ||
| 1642 | 1676 | ||
| 1643 | ctx = curr->perf_event_ctxp; | 1677 | ctx = cpuctx->task_ctx; |
| 1644 | if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) | 1678 | if (ctx && ctx->nr_events) { |
| 1645 | rotate = 1; | 1679 | remove = 0; |
| 1680 | if (ctx->nr_events != ctx->nr_active) | ||
| 1681 | rotate = 1; | ||
| 1682 | } | ||
| 1646 | 1683 | ||
| 1647 | perf_ctx_adjust_freq(&cpuctx->ctx); | 1684 | perf_pmu_disable(cpuctx->ctx.pmu); |
| 1685 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); | ||
| 1648 | if (ctx) | 1686 | if (ctx) |
| 1649 | perf_ctx_adjust_freq(ctx); | 1687 | perf_ctx_adjust_freq(ctx, interval); |
| 1650 | 1688 | ||
| 1651 | if (!rotate) | 1689 | if (!rotate) |
| 1652 | return; | 1690 | goto done; |
| 1653 | 1691 | ||
| 1654 | perf_disable(); | ||
| 1655 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 1692 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| 1656 | if (ctx) | 1693 | if (ctx) |
| 1657 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); | 1694 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); |
| @@ -1662,8 +1699,27 @@ void perf_event_task_tick(struct task_struct *curr) | |||
| 1662 | 1699 | ||
| 1663 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); | 1700 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); |
| 1664 | if (ctx) | 1701 | if (ctx) |
| 1665 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); | 1702 | task_ctx_sched_in(ctx, EVENT_FLEXIBLE); |
| 1666 | perf_enable(); | 1703 | |
| 1704 | done: | ||
| 1705 | if (remove) | ||
| 1706 | list_del_init(&cpuctx->rotation_list); | ||
| 1707 | |||
| 1708 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | void perf_event_task_tick(void) | ||
| 1712 | { | ||
| 1713 | struct list_head *head = &__get_cpu_var(rotation_list); | ||
| 1714 | struct perf_cpu_context *cpuctx, *tmp; | ||
| 1715 | |||
| 1716 | WARN_ON(!irqs_disabled()); | ||
| 1717 | |||
| 1718 | list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { | ||
| 1719 | if (cpuctx->jiffies_interval == 1 || | ||
| 1720 | !(jiffies % cpuctx->jiffies_interval)) | ||
| 1721 | perf_rotate_context(cpuctx); | ||
| 1722 | } | ||
| 1667 | } | 1723 | } |
| 1668 | 1724 | ||
| 1669 | static int event_enable_on_exec(struct perf_event *event, | 1725 | static int event_enable_on_exec(struct perf_event *event, |
| @@ -1685,20 +1741,18 @@ static int event_enable_on_exec(struct perf_event *event, | |||
| 1685 | * Enable all of a task's events that have been marked enable-on-exec. | 1741 | * Enable all of a task's events that have been marked enable-on-exec. |
| 1686 | * This expects task == current. | 1742 | * This expects task == current. |
| 1687 | */ | 1743 | */ |
| 1688 | static void perf_event_enable_on_exec(struct task_struct *task) | 1744 | static void perf_event_enable_on_exec(struct perf_event_context *ctx) |
| 1689 | { | 1745 | { |
| 1690 | struct perf_event_context *ctx; | ||
| 1691 | struct perf_event *event; | 1746 | struct perf_event *event; |
| 1692 | unsigned long flags; | 1747 | unsigned long flags; |
| 1693 | int enabled = 0; | 1748 | int enabled = 0; |
| 1694 | int ret; | 1749 | int ret; |
| 1695 | 1750 | ||
| 1696 | local_irq_save(flags); | 1751 | local_irq_save(flags); |
| 1697 | ctx = task->perf_event_ctxp; | ||
| 1698 | if (!ctx || !ctx->nr_events) | 1752 | if (!ctx || !ctx->nr_events) |
| 1699 | goto out; | 1753 | goto out; |
| 1700 | 1754 | ||
| 1701 | __perf_event_task_sched_out(ctx); | 1755 | task_ctx_sched_out(ctx, EVENT_ALL); |
| 1702 | 1756 | ||
| 1703 | raw_spin_lock(&ctx->lock); | 1757 | raw_spin_lock(&ctx->lock); |
| 1704 | 1758 | ||
| @@ -1722,8 +1776,8 @@ static void perf_event_enable_on_exec(struct task_struct *task) | |||
| 1722 | 1776 | ||
| 1723 | raw_spin_unlock(&ctx->lock); | 1777 | raw_spin_unlock(&ctx->lock); |
| 1724 | 1778 | ||
| 1725 | perf_event_task_sched_in(task); | 1779 | perf_event_context_sched_in(ctx); |
| 1726 | out: | 1780 | out: |
| 1727 | local_irq_restore(flags); | 1781 | local_irq_restore(flags); |
| 1728 | } | 1782 | } |
| 1729 | 1783 | ||
| @@ -1732,9 +1786,9 @@ static void perf_event_enable_on_exec(struct task_struct *task) | |||
| 1732 | */ | 1786 | */ |
| 1733 | static void __perf_event_read(void *info) | 1787 | static void __perf_event_read(void *info) |
| 1734 | { | 1788 | { |
| 1735 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 1736 | struct perf_event *event = info; | 1789 | struct perf_event *event = info; |
| 1737 | struct perf_event_context *ctx = event->ctx; | 1790 | struct perf_event_context *ctx = event->ctx; |
| 1791 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
| 1738 | 1792 | ||
| 1739 | /* | 1793 | /* |
| 1740 | * If this is a task context, we need to check whether it is | 1794 | * If this is a task context, we need to check whether it is |
| @@ -1773,7 +1827,13 @@ static u64 perf_event_read(struct perf_event *event) | |||
| 1773 | unsigned long flags; | 1827 | unsigned long flags; |
| 1774 | 1828 | ||
| 1775 | raw_spin_lock_irqsave(&ctx->lock, flags); | 1829 | raw_spin_lock_irqsave(&ctx->lock, flags); |
| 1776 | update_context_time(ctx); | 1830 | /* |
| 1831 | * may read while context is not active | ||
| 1832 | * (e.g., thread is blocked), in that case | ||
| 1833 | * we cannot update context time | ||
| 1834 | */ | ||
| 1835 | if (ctx->is_active) | ||
| 1836 | update_context_time(ctx); | ||
| 1777 | update_event_times(event); | 1837 | update_event_times(event); |
| 1778 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 1838 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| 1779 | } | 1839 | } |
| @@ -1782,11 +1842,219 @@ static u64 perf_event_read(struct perf_event *event) | |||
| 1782 | } | 1842 | } |
| 1783 | 1843 | ||
| 1784 | /* | 1844 | /* |
| 1785 | * Initialize the perf_event context in a task_struct: | 1845 | * Callchain support |
| 1786 | */ | 1846 | */ |
| 1847 | |||
| 1848 | struct callchain_cpus_entries { | ||
| 1849 | struct rcu_head rcu_head; | ||
| 1850 | struct perf_callchain_entry *cpu_entries[0]; | ||
| 1851 | }; | ||
| 1852 | |||
| 1853 | static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); | ||
| 1854 | static atomic_t nr_callchain_events; | ||
| 1855 | static DEFINE_MUTEX(callchain_mutex); | ||
| 1856 | struct callchain_cpus_entries *callchain_cpus_entries; | ||
| 1857 | |||
| 1858 | |||
| 1859 | __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
| 1860 | struct pt_regs *regs) | ||
| 1861 | { | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | __weak void perf_callchain_user(struct perf_callchain_entry *entry, | ||
| 1865 | struct pt_regs *regs) | ||
| 1866 | { | ||
| 1867 | } | ||
| 1868 | |||
| 1869 | static void release_callchain_buffers_rcu(struct rcu_head *head) | ||
| 1870 | { | ||
| 1871 | struct callchain_cpus_entries *entries; | ||
| 1872 | int cpu; | ||
| 1873 | |||
| 1874 | entries = container_of(head, struct callchain_cpus_entries, rcu_head); | ||
| 1875 | |||
| 1876 | for_each_possible_cpu(cpu) | ||
| 1877 | kfree(entries->cpu_entries[cpu]); | ||
| 1878 | |||
| 1879 | kfree(entries); | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | static void release_callchain_buffers(void) | ||
| 1883 | { | ||
| 1884 | struct callchain_cpus_entries *entries; | ||
| 1885 | |||
| 1886 | entries = callchain_cpus_entries; | ||
| 1887 | rcu_assign_pointer(callchain_cpus_entries, NULL); | ||
| 1888 | call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); | ||
| 1889 | } | ||
| 1890 | |||
| 1891 | static int alloc_callchain_buffers(void) | ||
| 1892 | { | ||
| 1893 | int cpu; | ||
| 1894 | int size; | ||
| 1895 | struct callchain_cpus_entries *entries; | ||
| 1896 | |||
| 1897 | /* | ||
| 1898 | * We can't use the percpu allocation API for data that can be | ||
| 1899 | * accessed from NMI. Use a temporary manual per cpu allocation | ||
| 1900 | * until that gets sorted out. | ||
| 1901 | */ | ||
| 1902 | size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * | ||
| 1903 | num_possible_cpus(); | ||
| 1904 | |||
| 1905 | entries = kzalloc(size, GFP_KERNEL); | ||
| 1906 | if (!entries) | ||
| 1907 | return -ENOMEM; | ||
| 1908 | |||
| 1909 | size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; | ||
| 1910 | |||
| 1911 | for_each_possible_cpu(cpu) { | ||
| 1912 | entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, | ||
| 1913 | cpu_to_node(cpu)); | ||
| 1914 | if (!entries->cpu_entries[cpu]) | ||
| 1915 | goto fail; | ||
| 1916 | } | ||
| 1917 | |||
| 1918 | rcu_assign_pointer(callchain_cpus_entries, entries); | ||
| 1919 | |||
| 1920 | return 0; | ||
| 1921 | |||
| 1922 | fail: | ||
| 1923 | for_each_possible_cpu(cpu) | ||
| 1924 | kfree(entries->cpu_entries[cpu]); | ||
| 1925 | kfree(entries); | ||
| 1926 | |||
| 1927 | return -ENOMEM; | ||
| 1928 | } | ||
| 1929 | |||
| 1930 | static int get_callchain_buffers(void) | ||
| 1931 | { | ||
| 1932 | int err = 0; | ||
| 1933 | int count; | ||
| 1934 | |||
| 1935 | mutex_lock(&callchain_mutex); | ||
| 1936 | |||
| 1937 | count = atomic_inc_return(&nr_callchain_events); | ||
| 1938 | if (WARN_ON_ONCE(count < 1)) { | ||
| 1939 | err = -EINVAL; | ||
| 1940 | goto exit; | ||
| 1941 | } | ||
| 1942 | |||
| 1943 | if (count > 1) { | ||
| 1944 | /* If the allocation failed, give up */ | ||
| 1945 | if (!callchain_cpus_entries) | ||
| 1946 | err = -ENOMEM; | ||
| 1947 | goto exit; | ||
| 1948 | } | ||
| 1949 | |||
| 1950 | err = alloc_callchain_buffers(); | ||
| 1951 | if (err) | ||
| 1952 | release_callchain_buffers(); | ||
| 1953 | exit: | ||
| 1954 | mutex_unlock(&callchain_mutex); | ||
| 1955 | |||
| 1956 | return err; | ||
| 1957 | } | ||
| 1958 | |||
| 1959 | static void put_callchain_buffers(void) | ||
| 1960 | { | ||
| 1961 | if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { | ||
| 1962 | release_callchain_buffers(); | ||
| 1963 | mutex_unlock(&callchain_mutex); | ||
| 1964 | } | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | static int get_recursion_context(int *recursion) | ||
| 1968 | { | ||
| 1969 | int rctx; | ||
| 1970 | |||
| 1971 | if (in_nmi()) | ||
| 1972 | rctx = 3; | ||
| 1973 | else if (in_irq()) | ||
| 1974 | rctx = 2; | ||
| 1975 | else if (in_softirq()) | ||
| 1976 | rctx = 1; | ||
| 1977 | else | ||
| 1978 | rctx = 0; | ||
| 1979 | |||
| 1980 | if (recursion[rctx]) | ||
| 1981 | return -1; | ||
| 1982 | |||
| 1983 | recursion[rctx]++; | ||
| 1984 | barrier(); | ||
| 1985 | |||
| 1986 | return rctx; | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | static inline void put_recursion_context(int *recursion, int rctx) | ||
| 1990 | { | ||
| 1991 | barrier(); | ||
| 1992 | recursion[rctx]--; | ||
| 1993 | } | ||
| 1994 | |||
| 1995 | static struct perf_callchain_entry *get_callchain_entry(int *rctx) | ||
| 1996 | { | ||
| 1997 | int cpu; | ||
| 1998 | struct callchain_cpus_entries *entries; | ||
| 1999 | |||
| 2000 | *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); | ||
| 2001 | if (*rctx == -1) | ||
| 2002 | return NULL; | ||
| 2003 | |||
| 2004 | entries = rcu_dereference(callchain_cpus_entries); | ||
| 2005 | if (!entries) | ||
| 2006 | return NULL; | ||
| 2007 | |||
| 2008 | cpu = smp_processor_id(); | ||
| 2009 | |||
| 2010 | return &entries->cpu_entries[cpu][*rctx]; | ||
| 2011 | } | ||
| 2012 | |||
| 1787 | static void | 2013 | static void |
| 1788 | __perf_event_init_context(struct perf_event_context *ctx, | 2014 | put_callchain_entry(int rctx) |
| 1789 | struct task_struct *task) | 2015 | { |
| 2016 | put_recursion_context(__get_cpu_var(callchain_recursion), rctx); | ||
| 2017 | } | ||
| 2018 | |||
| 2019 | static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
| 2020 | { | ||
| 2021 | int rctx; | ||
| 2022 | struct perf_callchain_entry *entry; | ||
| 2023 | |||
| 2024 | |||
| 2025 | entry = get_callchain_entry(&rctx); | ||
| 2026 | if (rctx == -1) | ||
| 2027 | return NULL; | ||
| 2028 | |||
| 2029 | if (!entry) | ||
| 2030 | goto exit_put; | ||
| 2031 | |||
| 2032 | entry->nr = 0; | ||
| 2033 | |||
| 2034 | if (!user_mode(regs)) { | ||
| 2035 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
| 2036 | perf_callchain_kernel(entry, regs); | ||
| 2037 | if (current->mm) | ||
| 2038 | regs = task_pt_regs(current); | ||
| 2039 | else | ||
| 2040 | regs = NULL; | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | if (regs) { | ||
| 2044 | perf_callchain_store(entry, PERF_CONTEXT_USER); | ||
| 2045 | perf_callchain_user(entry, regs); | ||
| 2046 | } | ||
| 2047 | |||
| 2048 | exit_put: | ||
| 2049 | put_callchain_entry(rctx); | ||
| 2050 | |||
| 2051 | return entry; | ||
| 2052 | } | ||
| 2053 | |||
| 2054 | /* | ||
| 2055 | * Initialize the perf_event context in a task_struct: | ||
| 2056 | */ | ||
| 2057 | static void __perf_event_init_context(struct perf_event_context *ctx) | ||
| 1790 | { | 2058 | { |
| 1791 | raw_spin_lock_init(&ctx->lock); | 2059 | raw_spin_lock_init(&ctx->lock); |
| 1792 | mutex_init(&ctx->mutex); | 2060 | mutex_init(&ctx->mutex); |
| @@ -1794,45 +2062,38 @@ __perf_event_init_context(struct perf_event_context *ctx, | |||
| 1794 | INIT_LIST_HEAD(&ctx->flexible_groups); | 2062 | INIT_LIST_HEAD(&ctx->flexible_groups); |
| 1795 | INIT_LIST_HEAD(&ctx->event_list); | 2063 | INIT_LIST_HEAD(&ctx->event_list); |
| 1796 | atomic_set(&ctx->refcount, 1); | 2064 | atomic_set(&ctx->refcount, 1); |
| 1797 | ctx->task = task; | ||
| 1798 | } | 2065 | } |
| 1799 | 2066 | ||
| 1800 | static struct perf_event_context *find_get_context(pid_t pid, int cpu) | 2067 | static struct perf_event_context * |
| 2068 | alloc_perf_context(struct pmu *pmu, struct task_struct *task) | ||
| 1801 | { | 2069 | { |
| 1802 | struct perf_event_context *ctx; | 2070 | struct perf_event_context *ctx; |
| 1803 | struct perf_cpu_context *cpuctx; | ||
| 1804 | struct task_struct *task; | ||
| 1805 | unsigned long flags; | ||
| 1806 | int err; | ||
| 1807 | |||
| 1808 | if (pid == -1 && cpu != -1) { | ||
| 1809 | /* Must be root to operate on a CPU event: */ | ||
| 1810 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
| 1811 | return ERR_PTR(-EACCES); | ||
| 1812 | 2071 | ||
| 1813 | if (cpu < 0 || cpu >= nr_cpumask_bits) | 2072 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
| 1814 | return ERR_PTR(-EINVAL); | 2073 | if (!ctx) |
| 2074 | return NULL; | ||
| 1815 | 2075 | ||
| 1816 | /* | 2076 | __perf_event_init_context(ctx); |
| 1817 | * We could be clever and allow to attach a event to an | 2077 | if (task) { |
| 1818 | * offline CPU and activate it when the CPU comes up, but | 2078 | ctx->task = task; |
| 1819 | * that's for later. | 2079 | get_task_struct(task); |
| 1820 | */ | 2080 | } |
| 1821 | if (!cpu_online(cpu)) | 2081 | ctx->pmu = pmu; |
| 1822 | return ERR_PTR(-ENODEV); | ||
| 1823 | 2082 | ||
| 1824 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 2083 | return ctx; |
| 1825 | ctx = &cpuctx->ctx; | 2084 | } |
| 1826 | get_ctx(ctx); | ||
| 1827 | 2085 | ||
| 1828 | return ctx; | 2086 | static struct task_struct * |
| 1829 | } | 2087 | find_lively_task_by_vpid(pid_t vpid) |
| 2088 | { | ||
| 2089 | struct task_struct *task; | ||
| 2090 | int err; | ||
| 1830 | 2091 | ||
| 1831 | rcu_read_lock(); | 2092 | rcu_read_lock(); |
| 1832 | if (!pid) | 2093 | if (!vpid) |
| 1833 | task = current; | 2094 | task = current; |
| 1834 | else | 2095 | else |
| 1835 | task = find_task_by_vpid(pid); | 2096 | task = find_task_by_vpid(vpid); |
| 1836 | if (task) | 2097 | if (task) |
| 1837 | get_task_struct(task); | 2098 | get_task_struct(task); |
| 1838 | rcu_read_unlock(); | 2099 | rcu_read_unlock(); |
| @@ -1852,36 +2113,78 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | |||
| 1852 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2113 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
| 1853 | goto errout; | 2114 | goto errout; |
| 1854 | 2115 | ||
| 1855 | retry: | 2116 | return task; |
| 1856 | ctx = perf_lock_task_context(task, &flags); | 2117 | errout: |
| 2118 | put_task_struct(task); | ||
| 2119 | return ERR_PTR(err); | ||
| 2120 | |||
| 2121 | } | ||
| 2122 | |||
| 2123 | static struct perf_event_context * | ||
| 2124 | find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | ||
| 2125 | { | ||
| 2126 | struct perf_event_context *ctx; | ||
| 2127 | struct perf_cpu_context *cpuctx; | ||
| 2128 | unsigned long flags; | ||
| 2129 | int ctxn, err; | ||
| 2130 | |||
| 2131 | if (!task && cpu != -1) { | ||
| 2132 | /* Must be root to operate on a CPU event: */ | ||
| 2133 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
| 2134 | return ERR_PTR(-EACCES); | ||
| 2135 | |||
| 2136 | if (cpu < 0 || cpu >= nr_cpumask_bits) | ||
| 2137 | return ERR_PTR(-EINVAL); | ||
| 2138 | |||
| 2139 | /* | ||
| 2140 | * We could be clever and allow to attach a event to an | ||
| 2141 | * offline CPU and activate it when the CPU comes up, but | ||
| 2142 | * that's for later. | ||
| 2143 | */ | ||
| 2144 | if (!cpu_online(cpu)) | ||
| 2145 | return ERR_PTR(-ENODEV); | ||
| 2146 | |||
| 2147 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | ||
| 2148 | ctx = &cpuctx->ctx; | ||
| 2149 | get_ctx(ctx); | ||
| 2150 | |||
| 2151 | return ctx; | ||
| 2152 | } | ||
| 2153 | |||
| 2154 | err = -EINVAL; | ||
| 2155 | ctxn = pmu->task_ctx_nr; | ||
| 2156 | if (ctxn < 0) | ||
| 2157 | goto errout; | ||
| 2158 | |||
| 2159 | retry: | ||
| 2160 | ctx = perf_lock_task_context(task, ctxn, &flags); | ||
| 1857 | if (ctx) { | 2161 | if (ctx) { |
| 1858 | unclone_ctx(ctx); | 2162 | unclone_ctx(ctx); |
| 1859 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 2163 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| 1860 | } | 2164 | } |
| 1861 | 2165 | ||
| 1862 | if (!ctx) { | 2166 | if (!ctx) { |
| 1863 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 2167 | ctx = alloc_perf_context(pmu, task); |
| 1864 | err = -ENOMEM; | 2168 | err = -ENOMEM; |
| 1865 | if (!ctx) | 2169 | if (!ctx) |
| 1866 | goto errout; | 2170 | goto errout; |
| 1867 | __perf_event_init_context(ctx, task); | 2171 | |
| 1868 | get_ctx(ctx); | 2172 | get_ctx(ctx); |
| 1869 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { | 2173 | |
| 2174 | if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { | ||
| 1870 | /* | 2175 | /* |
| 1871 | * We raced with some other task; use | 2176 | * We raced with some other task; use |
| 1872 | * the context they set. | 2177 | * the context they set. |
| 1873 | */ | 2178 | */ |
| 2179 | put_task_struct(task); | ||
| 1874 | kfree(ctx); | 2180 | kfree(ctx); |
| 1875 | goto retry; | 2181 | goto retry; |
| 1876 | } | 2182 | } |
| 1877 | get_task_struct(task); | ||
| 1878 | } | 2183 | } |
| 1879 | 2184 | ||
| 1880 | put_task_struct(task); | ||
| 1881 | return ctx; | 2185 | return ctx; |
| 1882 | 2186 | ||
| 1883 | errout: | 2187 | errout: |
| 1884 | put_task_struct(task); | ||
| 1885 | return ERR_PTR(err); | 2188 | return ERR_PTR(err); |
| 1886 | } | 2189 | } |
| 1887 | 2190 | ||
| @@ -1898,21 +2201,23 @@ static void free_event_rcu(struct rcu_head *head) | |||
| 1898 | kfree(event); | 2201 | kfree(event); |
| 1899 | } | 2202 | } |
| 1900 | 2203 | ||
| 1901 | static void perf_pending_sync(struct perf_event *event); | ||
| 1902 | static void perf_buffer_put(struct perf_buffer *buffer); | 2204 | static void perf_buffer_put(struct perf_buffer *buffer); |
| 1903 | 2205 | ||
| 1904 | static void free_event(struct perf_event *event) | 2206 | static void free_event(struct perf_event *event) |
| 1905 | { | 2207 | { |
| 1906 | perf_pending_sync(event); | 2208 | irq_work_sync(&event->pending); |
| 1907 | 2209 | ||
| 1908 | if (!event->parent) { | 2210 | if (!event->parent) { |
| 1909 | atomic_dec(&nr_events); | 2211 | if (event->attach_state & PERF_ATTACH_TASK) |
| 2212 | jump_label_dec(&perf_task_events); | ||
| 1910 | if (event->attr.mmap || event->attr.mmap_data) | 2213 | if (event->attr.mmap || event->attr.mmap_data) |
| 1911 | atomic_dec(&nr_mmap_events); | 2214 | atomic_dec(&nr_mmap_events); |
| 1912 | if (event->attr.comm) | 2215 | if (event->attr.comm) |
| 1913 | atomic_dec(&nr_comm_events); | 2216 | atomic_dec(&nr_comm_events); |
| 1914 | if (event->attr.task) | 2217 | if (event->attr.task) |
| 1915 | atomic_dec(&nr_task_events); | 2218 | atomic_dec(&nr_task_events); |
| 2219 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) | ||
| 2220 | put_callchain_buffers(); | ||
| 1916 | } | 2221 | } |
| 1917 | 2222 | ||
| 1918 | if (event->buffer) { | 2223 | if (event->buffer) { |
| @@ -1923,7 +2228,9 @@ static void free_event(struct perf_event *event) | |||
| 1923 | if (event->destroy) | 2228 | if (event->destroy) |
| 1924 | event->destroy(event); | 2229 | event->destroy(event); |
| 1925 | 2230 | ||
| 1926 | put_ctx(event->ctx); | 2231 | if (event->ctx) |
| 2232 | put_ctx(event->ctx); | ||
| 2233 | |||
| 1927 | call_rcu(&event->rcu_head, free_event_rcu); | 2234 | call_rcu(&event->rcu_head, free_event_rcu); |
| 1928 | } | 2235 | } |
| 1929 | 2236 | ||
| @@ -2202,15 +2509,13 @@ static void perf_event_for_each(struct perf_event *event, | |||
| 2202 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 2509 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
| 2203 | { | 2510 | { |
| 2204 | struct perf_event_context *ctx = event->ctx; | 2511 | struct perf_event_context *ctx = event->ctx; |
| 2205 | unsigned long size; | ||
| 2206 | int ret = 0; | 2512 | int ret = 0; |
| 2207 | u64 value; | 2513 | u64 value; |
| 2208 | 2514 | ||
| 2209 | if (!event->attr.sample_period) | 2515 | if (!event->attr.sample_period) |
| 2210 | return -EINVAL; | 2516 | return -EINVAL; |
| 2211 | 2517 | ||
| 2212 | size = copy_from_user(&value, arg, sizeof(value)); | 2518 | if (copy_from_user(&value, arg, sizeof(value))) |
| 2213 | if (size != sizeof(value)) | ||
| 2214 | return -EFAULT; | 2519 | return -EFAULT; |
| 2215 | 2520 | ||
| 2216 | if (!value) | 2521 | if (!value) |
| @@ -2344,6 +2649,9 @@ int perf_event_task_disable(void) | |||
| 2344 | 2649 | ||
| 2345 | static int perf_event_index(struct perf_event *event) | 2650 | static int perf_event_index(struct perf_event *event) |
| 2346 | { | 2651 | { |
| 2652 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 2653 | return 0; | ||
| 2654 | |||
| 2347 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2655 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 2348 | return 0; | 2656 | return 0; |
| 2349 | 2657 | ||
| @@ -2847,16 +3155,7 @@ void perf_event_wakeup(struct perf_event *event) | |||
| 2847 | } | 3155 | } |
| 2848 | } | 3156 | } |
| 2849 | 3157 | ||
| 2850 | /* | 3158 | static void perf_pending_event(struct irq_work *entry) |
| 2851 | * Pending wakeups | ||
| 2852 | * | ||
| 2853 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | ||
| 2854 | * | ||
| 2855 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | ||
| 2856 | * single linked list and use cmpxchg() to add entries lockless. | ||
| 2857 | */ | ||
| 2858 | |||
| 2859 | static void perf_pending_event(struct perf_pending_entry *entry) | ||
| 2860 | { | 3159 | { |
| 2861 | struct perf_event *event = container_of(entry, | 3160 | struct perf_event *event = container_of(entry, |
| 2862 | struct perf_event, pending); | 3161 | struct perf_event, pending); |
| @@ -2872,99 +3171,6 @@ static void perf_pending_event(struct perf_pending_entry *entry) | |||
| 2872 | } | 3171 | } |
| 2873 | } | 3172 | } |
| 2874 | 3173 | ||
| 2875 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | ||
| 2876 | |||
| 2877 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | ||
| 2878 | PENDING_TAIL, | ||
| 2879 | }; | ||
| 2880 | |||
| 2881 | static void perf_pending_queue(struct perf_pending_entry *entry, | ||
| 2882 | void (*func)(struct perf_pending_entry *)) | ||
| 2883 | { | ||
| 2884 | struct perf_pending_entry **head; | ||
| 2885 | |||
| 2886 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | ||
| 2887 | return; | ||
| 2888 | |||
| 2889 | entry->func = func; | ||
| 2890 | |||
| 2891 | head = &get_cpu_var(perf_pending_head); | ||
| 2892 | |||
| 2893 | do { | ||
| 2894 | entry->next = *head; | ||
| 2895 | } while (cmpxchg(head, entry->next, entry) != entry->next); | ||
| 2896 | |||
| 2897 | set_perf_event_pending(); | ||
| 2898 | |||
| 2899 | put_cpu_var(perf_pending_head); | ||
| 2900 | } | ||
| 2901 | |||
| 2902 | static int __perf_pending_run(void) | ||
| 2903 | { | ||
| 2904 | struct perf_pending_entry *list; | ||
| 2905 | int nr = 0; | ||
| 2906 | |||
| 2907 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | ||
| 2908 | while (list != PENDING_TAIL) { | ||
| 2909 | void (*func)(struct perf_pending_entry *); | ||
| 2910 | struct perf_pending_entry *entry = list; | ||
| 2911 | |||
| 2912 | list = list->next; | ||
| 2913 | |||
| 2914 | func = entry->func; | ||
| 2915 | entry->next = NULL; | ||
| 2916 | /* | ||
| 2917 | * Ensure we observe the unqueue before we issue the wakeup, | ||
| 2918 | * so that we won't be waiting forever. | ||
| 2919 | * -- see perf_not_pending(). | ||
| 2920 | */ | ||
| 2921 | smp_wmb(); | ||
| 2922 | |||
| 2923 | func(entry); | ||
| 2924 | nr++; | ||
| 2925 | } | ||
| 2926 | |||
| 2927 | return nr; | ||
| 2928 | } | ||
| 2929 | |||
| 2930 | static inline int perf_not_pending(struct perf_event *event) | ||
| 2931 | { | ||
| 2932 | /* | ||
| 2933 | * If we flush on whatever cpu we run, there is a chance we don't | ||
| 2934 | * need to wait. | ||
| 2935 | */ | ||
| 2936 | get_cpu(); | ||
| 2937 | __perf_pending_run(); | ||
| 2938 | put_cpu(); | ||
| 2939 | |||
| 2940 | /* | ||
| 2941 | * Ensure we see the proper queue state before going to sleep | ||
| 2942 | * so that we do not miss the wakeup. -- see perf_pending_handle() | ||
| 2943 | */ | ||
| 2944 | smp_rmb(); | ||
| 2945 | return event->pending.next == NULL; | ||
| 2946 | } | ||
| 2947 | |||
| 2948 | static void perf_pending_sync(struct perf_event *event) | ||
| 2949 | { | ||
| 2950 | wait_event(event->waitq, perf_not_pending(event)); | ||
| 2951 | } | ||
| 2952 | |||
| 2953 | void perf_event_do_pending(void) | ||
| 2954 | { | ||
| 2955 | __perf_pending_run(); | ||
| 2956 | } | ||
| 2957 | |||
| 2958 | /* | ||
| 2959 | * Callchain support -- arch specific | ||
| 2960 | */ | ||
| 2961 | |||
| 2962 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
| 2963 | { | ||
| 2964 | return NULL; | ||
| 2965 | } | ||
| 2966 | |||
| 2967 | |||
| 2968 | /* | 3174 | /* |
| 2969 | * We assume there is only KVM supporting the callbacks. | 3175 | * We assume there is only KVM supporting the callbacks. |
| 2970 | * Later on, we might change it to a list if there is | 3176 | * Later on, we might change it to a list if there is |
| @@ -3014,8 +3220,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
| 3014 | 3220 | ||
| 3015 | if (handle->nmi) { | 3221 | if (handle->nmi) { |
| 3016 | handle->event->pending_wakeup = 1; | 3222 | handle->event->pending_wakeup = 1; |
| 3017 | perf_pending_queue(&handle->event->pending, | 3223 | irq_work_queue(&handle->event->pending); |
| 3018 | perf_pending_event); | ||
| 3019 | } else | 3224 | } else |
| 3020 | perf_event_wakeup(handle->event); | 3225 | perf_event_wakeup(handle->event); |
| 3021 | } | 3226 | } |
| @@ -3071,7 +3276,7 @@ again: | |||
| 3071 | if (handle->wakeup != local_read(&buffer->wakeup)) | 3276 | if (handle->wakeup != local_read(&buffer->wakeup)) |
| 3072 | perf_output_wakeup(handle); | 3277 | perf_output_wakeup(handle); |
| 3073 | 3278 | ||
| 3074 | out: | 3279 | out: |
| 3075 | preempt_enable(); | 3280 | preempt_enable(); |
| 3076 | } | 3281 | } |
| 3077 | 3282 | ||
| @@ -3459,14 +3664,20 @@ static void perf_event_output(struct perf_event *event, int nmi, | |||
| 3459 | struct perf_output_handle handle; | 3664 | struct perf_output_handle handle; |
| 3460 | struct perf_event_header header; | 3665 | struct perf_event_header header; |
| 3461 | 3666 | ||
| 3667 | /* protect the callchain buffers */ | ||
| 3668 | rcu_read_lock(); | ||
| 3669 | |||
| 3462 | perf_prepare_sample(&header, data, event, regs); | 3670 | perf_prepare_sample(&header, data, event, regs); |
| 3463 | 3671 | ||
| 3464 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) | 3672 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) |
| 3465 | return; | 3673 | goto exit; |
| 3466 | 3674 | ||
| 3467 | perf_output_sample(&handle, &header, data, event); | 3675 | perf_output_sample(&handle, &header, data, event); |
| 3468 | 3676 | ||
| 3469 | perf_output_end(&handle); | 3677 | perf_output_end(&handle); |
| 3678 | |||
| 3679 | exit: | ||
| 3680 | rcu_read_unlock(); | ||
| 3470 | } | 3681 | } |
| 3471 | 3682 | ||
| 3472 | /* | 3683 | /* |
| @@ -3580,16 +3791,27 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, | |||
| 3580 | static void perf_event_task_event(struct perf_task_event *task_event) | 3791 | static void perf_event_task_event(struct perf_task_event *task_event) |
| 3581 | { | 3792 | { |
| 3582 | struct perf_cpu_context *cpuctx; | 3793 | struct perf_cpu_context *cpuctx; |
| 3583 | struct perf_event_context *ctx = task_event->task_ctx; | 3794 | struct perf_event_context *ctx; |
| 3795 | struct pmu *pmu; | ||
| 3796 | int ctxn; | ||
| 3584 | 3797 | ||
| 3585 | rcu_read_lock(); | 3798 | rcu_read_lock(); |
| 3586 | cpuctx = &get_cpu_var(perf_cpu_context); | 3799 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
| 3587 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3800 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
| 3588 | if (!ctx) | 3801 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
| 3589 | ctx = rcu_dereference(current->perf_event_ctxp); | 3802 | |
| 3590 | if (ctx) | 3803 | ctx = task_event->task_ctx; |
| 3591 | perf_event_task_ctx(ctx, task_event); | 3804 | if (!ctx) { |
| 3592 | put_cpu_var(perf_cpu_context); | 3805 | ctxn = pmu->task_ctx_nr; |
| 3806 | if (ctxn < 0) | ||
| 3807 | goto next; | ||
| 3808 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 3809 | } | ||
| 3810 | if (ctx) | ||
| 3811 | perf_event_task_ctx(ctx, task_event); | ||
| 3812 | next: | ||
| 3813 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 3814 | } | ||
| 3593 | rcu_read_unlock(); | 3815 | rcu_read_unlock(); |
| 3594 | } | 3816 | } |
| 3595 | 3817 | ||
| @@ -3694,8 +3916,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
| 3694 | { | 3916 | { |
| 3695 | struct perf_cpu_context *cpuctx; | 3917 | struct perf_cpu_context *cpuctx; |
| 3696 | struct perf_event_context *ctx; | 3918 | struct perf_event_context *ctx; |
| 3697 | unsigned int size; | ||
| 3698 | char comm[TASK_COMM_LEN]; | 3919 | char comm[TASK_COMM_LEN]; |
| 3920 | unsigned int size; | ||
| 3921 | struct pmu *pmu; | ||
| 3922 | int ctxn; | ||
| 3699 | 3923 | ||
| 3700 | memset(comm, 0, sizeof(comm)); | 3924 | memset(comm, 0, sizeof(comm)); |
| 3701 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); | 3925 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
| @@ -3707,21 +3931,36 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
| 3707 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 3931 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
| 3708 | 3932 | ||
| 3709 | rcu_read_lock(); | 3933 | rcu_read_lock(); |
| 3710 | cpuctx = &get_cpu_var(perf_cpu_context); | 3934 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
| 3711 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 3935 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
| 3712 | ctx = rcu_dereference(current->perf_event_ctxp); | 3936 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); |
| 3713 | if (ctx) | 3937 | |
| 3714 | perf_event_comm_ctx(ctx, comm_event); | 3938 | ctxn = pmu->task_ctx_nr; |
| 3715 | put_cpu_var(perf_cpu_context); | 3939 | if (ctxn < 0) |
| 3940 | goto next; | ||
| 3941 | |||
| 3942 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 3943 | if (ctx) | ||
| 3944 | perf_event_comm_ctx(ctx, comm_event); | ||
| 3945 | next: | ||
| 3946 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 3947 | } | ||
| 3716 | rcu_read_unlock(); | 3948 | rcu_read_unlock(); |
| 3717 | } | 3949 | } |
| 3718 | 3950 | ||
| 3719 | void perf_event_comm(struct task_struct *task) | 3951 | void perf_event_comm(struct task_struct *task) |
| 3720 | { | 3952 | { |
| 3721 | struct perf_comm_event comm_event; | 3953 | struct perf_comm_event comm_event; |
| 3954 | struct perf_event_context *ctx; | ||
| 3955 | int ctxn; | ||
| 3722 | 3956 | ||
| 3723 | if (task->perf_event_ctxp) | 3957 | for_each_task_context_nr(ctxn) { |
| 3724 | perf_event_enable_on_exec(task); | 3958 | ctx = task->perf_event_ctxp[ctxn]; |
| 3959 | if (!ctx) | ||
| 3960 | continue; | ||
| 3961 | |||
| 3962 | perf_event_enable_on_exec(ctx); | ||
| 3963 | } | ||
| 3725 | 3964 | ||
| 3726 | if (!atomic_read(&nr_comm_events)) | 3965 | if (!atomic_read(&nr_comm_events)) |
| 3727 | return; | 3966 | return; |
| @@ -3823,6 +4062,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
| 3823 | char tmp[16]; | 4062 | char tmp[16]; |
| 3824 | char *buf = NULL; | 4063 | char *buf = NULL; |
| 3825 | const char *name; | 4064 | const char *name; |
| 4065 | struct pmu *pmu; | ||
| 4066 | int ctxn; | ||
| 3826 | 4067 | ||
| 3827 | memset(tmp, 0, sizeof(tmp)); | 4068 | memset(tmp, 0, sizeof(tmp)); |
| 3828 | 4069 | ||
| @@ -3875,12 +4116,23 @@ got_name: | |||
| 3875 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 4116 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
| 3876 | 4117 | ||
| 3877 | rcu_read_lock(); | 4118 | rcu_read_lock(); |
| 3878 | cpuctx = &get_cpu_var(perf_cpu_context); | 4119 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
| 3879 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); | 4120 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
| 3880 | ctx = rcu_dereference(current->perf_event_ctxp); | 4121 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, |
| 3881 | if (ctx) | 4122 | vma->vm_flags & VM_EXEC); |
| 3882 | perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); | 4123 | |
| 3883 | put_cpu_var(perf_cpu_context); | 4124 | ctxn = pmu->task_ctx_nr; |
| 4125 | if (ctxn < 0) | ||
| 4126 | goto next; | ||
| 4127 | |||
| 4128 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | ||
| 4129 | if (ctx) { | ||
| 4130 | perf_event_mmap_ctx(ctx, mmap_event, | ||
| 4131 | vma->vm_flags & VM_EXEC); | ||
| 4132 | } | ||
| 4133 | next: | ||
| 4134 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4135 | } | ||
| 3884 | rcu_read_unlock(); | 4136 | rcu_read_unlock(); |
| 3885 | 4137 | ||
| 3886 | kfree(buf); | 4138 | kfree(buf); |
| @@ -3962,8 +4214,6 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
| 3962 | struct hw_perf_event *hwc = &event->hw; | 4214 | struct hw_perf_event *hwc = &event->hw; |
| 3963 | int ret = 0; | 4215 | int ret = 0; |
| 3964 | 4216 | ||
| 3965 | throttle = (throttle && event->pmu->unthrottle != NULL); | ||
| 3966 | |||
| 3967 | if (!throttle) { | 4217 | if (!throttle) { |
| 3968 | hwc->interrupts++; | 4218 | hwc->interrupts++; |
| 3969 | } else { | 4219 | } else { |
| @@ -4006,8 +4256,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
| 4006 | event->pending_kill = POLL_HUP; | 4256 | event->pending_kill = POLL_HUP; |
| 4007 | if (nmi) { | 4257 | if (nmi) { |
| 4008 | event->pending_disable = 1; | 4258 | event->pending_disable = 1; |
| 4009 | perf_pending_queue(&event->pending, | 4259 | irq_work_queue(&event->pending); |
| 4010 | perf_pending_event); | ||
| 4011 | } else | 4260 | } else |
| 4012 | perf_event_disable(event); | 4261 | perf_event_disable(event); |
| 4013 | } | 4262 | } |
| @@ -4031,6 +4280,17 @@ int perf_event_overflow(struct perf_event *event, int nmi, | |||
| 4031 | * Generic software event infrastructure | 4280 | * Generic software event infrastructure |
| 4032 | */ | 4281 | */ |
| 4033 | 4282 | ||
| 4283 | struct swevent_htable { | ||
| 4284 | struct swevent_hlist *swevent_hlist; | ||
| 4285 | struct mutex hlist_mutex; | ||
| 4286 | int hlist_refcount; | ||
| 4287 | |||
| 4288 | /* Recursion avoidance in each contexts */ | ||
| 4289 | int recursion[PERF_NR_CONTEXTS]; | ||
| 4290 | }; | ||
| 4291 | |||
| 4292 | static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); | ||
| 4293 | |||
| 4034 | /* | 4294 | /* |
| 4035 | * We directly increment event->count and keep a second value in | 4295 | * We directly increment event->count and keep a second value in |
| 4036 | * event->hw.period_left to count intervals. This period event | 4296 | * event->hw.period_left to count intervals. This period event |
| @@ -4088,7 +4348,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | |||
| 4088 | } | 4348 | } |
| 4089 | } | 4349 | } |
| 4090 | 4350 | ||
| 4091 | static void perf_swevent_add(struct perf_event *event, u64 nr, | 4351 | static void perf_swevent_event(struct perf_event *event, u64 nr, |
| 4092 | int nmi, struct perf_sample_data *data, | 4352 | int nmi, struct perf_sample_data *data, |
| 4093 | struct pt_regs *regs) | 4353 | struct pt_regs *regs) |
| 4094 | { | 4354 | { |
| @@ -4114,6 +4374,9 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
| 4114 | static int perf_exclude_event(struct perf_event *event, | 4374 | static int perf_exclude_event(struct perf_event *event, |
| 4115 | struct pt_regs *regs) | 4375 | struct pt_regs *regs) |
| 4116 | { | 4376 | { |
| 4377 | if (event->hw.state & PERF_HES_STOPPED) | ||
| 4378 | return 0; | ||
| 4379 | |||
| 4117 | if (regs) { | 4380 | if (regs) { |
| 4118 | if (event->attr.exclude_user && user_mode(regs)) | 4381 | if (event->attr.exclude_user && user_mode(regs)) |
| 4119 | return 1; | 4382 | return 1; |
| @@ -4160,11 +4423,11 @@ __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) | |||
| 4160 | 4423 | ||
| 4161 | /* For the read side: events when they trigger */ | 4424 | /* For the read side: events when they trigger */ |
| 4162 | static inline struct hlist_head * | 4425 | static inline struct hlist_head * |
| 4163 | find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id) | 4426 | find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) |
| 4164 | { | 4427 | { |
| 4165 | struct swevent_hlist *hlist; | 4428 | struct swevent_hlist *hlist; |
| 4166 | 4429 | ||
| 4167 | hlist = rcu_dereference(ctx->swevent_hlist); | 4430 | hlist = rcu_dereference(swhash->swevent_hlist); |
| 4168 | if (!hlist) | 4431 | if (!hlist) |
| 4169 | return NULL; | 4432 | return NULL; |
| 4170 | 4433 | ||
| @@ -4173,7 +4436,7 @@ find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id) | |||
| 4173 | 4436 | ||
| 4174 | /* For the event head insertion and removal in the hlist */ | 4437 | /* For the event head insertion and removal in the hlist */ |
| 4175 | static inline struct hlist_head * | 4438 | static inline struct hlist_head * |
| 4176 | find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event) | 4439 | find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) |
| 4177 | { | 4440 | { |
| 4178 | struct swevent_hlist *hlist; | 4441 | struct swevent_hlist *hlist; |
| 4179 | u32 event_id = event->attr.config; | 4442 | u32 event_id = event->attr.config; |
| @@ -4184,7 +4447,7 @@ find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event) | |||
| 4184 | * and release. Which makes the protected version suitable here. | 4447 | * and release. Which makes the protected version suitable here. |
| 4185 | * The context lock guarantees that. | 4448 | * The context lock guarantees that. |
| 4186 | */ | 4449 | */ |
| 4187 | hlist = rcu_dereference_protected(ctx->swevent_hlist, | 4450 | hlist = rcu_dereference_protected(swhash->swevent_hlist, |
| 4188 | lockdep_is_held(&event->ctx->lock)); | 4451 | lockdep_is_held(&event->ctx->lock)); |
| 4189 | if (!hlist) | 4452 | if (!hlist) |
| 4190 | return NULL; | 4453 | return NULL; |
| @@ -4197,23 +4460,19 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | |||
| 4197 | struct perf_sample_data *data, | 4460 | struct perf_sample_data *data, |
| 4198 | struct pt_regs *regs) | 4461 | struct pt_regs *regs) |
| 4199 | { | 4462 | { |
| 4200 | struct perf_cpu_context *cpuctx; | 4463 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); |
| 4201 | struct perf_event *event; | 4464 | struct perf_event *event; |
| 4202 | struct hlist_node *node; | 4465 | struct hlist_node *node; |
| 4203 | struct hlist_head *head; | 4466 | struct hlist_head *head; |
| 4204 | 4467 | ||
| 4205 | cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 4206 | |||
| 4207 | rcu_read_lock(); | 4468 | rcu_read_lock(); |
| 4208 | 4469 | head = find_swevent_head_rcu(swhash, type, event_id); | |
| 4209 | head = find_swevent_head_rcu(cpuctx, type, event_id); | ||
| 4210 | |||
| 4211 | if (!head) | 4470 | if (!head) |
| 4212 | goto end; | 4471 | goto end; |
| 4213 | 4472 | ||
| 4214 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 4473 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
| 4215 | if (perf_swevent_match(event, type, event_id, data, regs)) | 4474 | if (perf_swevent_match(event, type, event_id, data, regs)) |
| 4216 | perf_swevent_add(event, nr, nmi, data, regs); | 4475 | perf_swevent_event(event, nr, nmi, data, regs); |
| 4217 | } | 4476 | } |
| 4218 | end: | 4477 | end: |
| 4219 | rcu_read_unlock(); | 4478 | rcu_read_unlock(); |
| @@ -4221,33 +4480,17 @@ end: | |||
| 4221 | 4480 | ||
| 4222 | int perf_swevent_get_recursion_context(void) | 4481 | int perf_swevent_get_recursion_context(void) |
| 4223 | { | 4482 | { |
| 4224 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 4483 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); |
| 4225 | int rctx; | ||
| 4226 | |||
| 4227 | if (in_nmi()) | ||
| 4228 | rctx = 3; | ||
| 4229 | else if (in_irq()) | ||
| 4230 | rctx = 2; | ||
| 4231 | else if (in_softirq()) | ||
| 4232 | rctx = 1; | ||
| 4233 | else | ||
| 4234 | rctx = 0; | ||
| 4235 | |||
| 4236 | if (cpuctx->recursion[rctx]) | ||
| 4237 | return -1; | ||
| 4238 | |||
| 4239 | cpuctx->recursion[rctx]++; | ||
| 4240 | barrier(); | ||
| 4241 | 4484 | ||
| 4242 | return rctx; | 4485 | return get_recursion_context(swhash->recursion); |
| 4243 | } | 4486 | } |
| 4244 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | 4487 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
| 4245 | 4488 | ||
| 4246 | void inline perf_swevent_put_recursion_context(int rctx) | 4489 | void inline perf_swevent_put_recursion_context(int rctx) |
| 4247 | { | 4490 | { |
| 4248 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 4491 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); |
| 4249 | barrier(); | 4492 | |
| 4250 | cpuctx->recursion[rctx]--; | 4493 | put_recursion_context(swhash->recursion, rctx); |
| 4251 | } | 4494 | } |
| 4252 | 4495 | ||
| 4253 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 4496 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
| @@ -4273,20 +4516,20 @@ static void perf_swevent_read(struct perf_event *event) | |||
| 4273 | { | 4516 | { |
| 4274 | } | 4517 | } |
| 4275 | 4518 | ||
| 4276 | static int perf_swevent_enable(struct perf_event *event) | 4519 | static int perf_swevent_add(struct perf_event *event, int flags) |
| 4277 | { | 4520 | { |
| 4521 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | ||
| 4278 | struct hw_perf_event *hwc = &event->hw; | 4522 | struct hw_perf_event *hwc = &event->hw; |
| 4279 | struct perf_cpu_context *cpuctx; | ||
| 4280 | struct hlist_head *head; | 4523 | struct hlist_head *head; |
| 4281 | 4524 | ||
| 4282 | cpuctx = &__get_cpu_var(perf_cpu_context); | ||
| 4283 | |||
| 4284 | if (hwc->sample_period) { | 4525 | if (hwc->sample_period) { |
| 4285 | hwc->last_period = hwc->sample_period; | 4526 | hwc->last_period = hwc->sample_period; |
| 4286 | perf_swevent_set_period(event); | 4527 | perf_swevent_set_period(event); |
| 4287 | } | 4528 | } |
| 4288 | 4529 | ||
| 4289 | head = find_swevent_head(cpuctx, event); | 4530 | hwc->state = !(flags & PERF_EF_START); |
| 4531 | |||
| 4532 | head = find_swevent_head(swhash, event); | ||
| 4290 | if (WARN_ON_ONCE(!head)) | 4533 | if (WARN_ON_ONCE(!head)) |
| 4291 | return -EINVAL; | 4534 | return -EINVAL; |
| 4292 | 4535 | ||
| @@ -4295,202 +4538,27 @@ static int perf_swevent_enable(struct perf_event *event) | |||
| 4295 | return 0; | 4538 | return 0; |
| 4296 | } | 4539 | } |
| 4297 | 4540 | ||
| 4298 | static void perf_swevent_disable(struct perf_event *event) | 4541 | static void perf_swevent_del(struct perf_event *event, int flags) |
| 4299 | { | 4542 | { |
| 4300 | hlist_del_rcu(&event->hlist_entry); | 4543 | hlist_del_rcu(&event->hlist_entry); |
| 4301 | } | 4544 | } |
| 4302 | 4545 | ||
| 4303 | static void perf_swevent_void(struct perf_event *event) | 4546 | static void perf_swevent_start(struct perf_event *event, int flags) |
| 4304 | { | ||
| 4305 | } | ||
| 4306 | |||
| 4307 | static int perf_swevent_int(struct perf_event *event) | ||
| 4308 | { | ||
| 4309 | return 0; | ||
| 4310 | } | ||
| 4311 | |||
| 4312 | static const struct pmu perf_ops_generic = { | ||
| 4313 | .enable = perf_swevent_enable, | ||
| 4314 | .disable = perf_swevent_disable, | ||
| 4315 | .start = perf_swevent_int, | ||
| 4316 | .stop = perf_swevent_void, | ||
| 4317 | .read = perf_swevent_read, | ||
| 4318 | .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */ | ||
| 4319 | }; | ||
| 4320 | |||
| 4321 | /* | ||
| 4322 | * hrtimer based swevent callback | ||
| 4323 | */ | ||
| 4324 | |||
| 4325 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | ||
| 4326 | { | 4547 | { |
| 4327 | enum hrtimer_restart ret = HRTIMER_RESTART; | 4548 | event->hw.state = 0; |
| 4328 | struct perf_sample_data data; | ||
| 4329 | struct pt_regs *regs; | ||
| 4330 | struct perf_event *event; | ||
| 4331 | u64 period; | ||
| 4332 | |||
| 4333 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); | ||
| 4334 | event->pmu->read(event); | ||
| 4335 | |||
| 4336 | perf_sample_data_init(&data, 0); | ||
| 4337 | data.period = event->hw.last_period; | ||
| 4338 | regs = get_irq_regs(); | ||
| 4339 | |||
| 4340 | if (regs && !perf_exclude_event(event, regs)) { | ||
| 4341 | if (!(event->attr.exclude_idle && current->pid == 0)) | ||
| 4342 | if (perf_event_overflow(event, 0, &data, regs)) | ||
| 4343 | ret = HRTIMER_NORESTART; | ||
| 4344 | } | ||
| 4345 | |||
| 4346 | period = max_t(u64, 10000, event->hw.sample_period); | ||
| 4347 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
| 4348 | |||
| 4349 | return ret; | ||
| 4350 | } | 4549 | } |
| 4351 | 4550 | ||
| 4352 | static void perf_swevent_start_hrtimer(struct perf_event *event) | 4551 | static void perf_swevent_stop(struct perf_event *event, int flags) |
| 4353 | { | 4552 | { |
| 4354 | struct hw_perf_event *hwc = &event->hw; | 4553 | event->hw.state = PERF_HES_STOPPED; |
| 4355 | |||
| 4356 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
| 4357 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
| 4358 | if (hwc->sample_period) { | ||
| 4359 | u64 period; | ||
| 4360 | |||
| 4361 | if (hwc->remaining) { | ||
| 4362 | if (hwc->remaining < 0) | ||
| 4363 | period = 10000; | ||
| 4364 | else | ||
| 4365 | period = hwc->remaining; | ||
| 4366 | hwc->remaining = 0; | ||
| 4367 | } else { | ||
| 4368 | period = max_t(u64, 10000, hwc->sample_period); | ||
| 4369 | } | ||
| 4370 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
| 4371 | ns_to_ktime(period), 0, | ||
| 4372 | HRTIMER_MODE_REL, 0); | ||
| 4373 | } | ||
| 4374 | } | ||
| 4375 | |||
| 4376 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
| 4377 | { | ||
| 4378 | struct hw_perf_event *hwc = &event->hw; | ||
| 4379 | |||
| 4380 | if (hwc->sample_period) { | ||
| 4381 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
| 4382 | hwc->remaining = ktime_to_ns(remaining); | ||
| 4383 | |||
| 4384 | hrtimer_cancel(&hwc->hrtimer); | ||
| 4385 | } | ||
| 4386 | } | ||
| 4387 | |||
| 4388 | /* | ||
| 4389 | * Software event: cpu wall time clock | ||
| 4390 | */ | ||
| 4391 | |||
| 4392 | static void cpu_clock_perf_event_update(struct perf_event *event) | ||
| 4393 | { | ||
| 4394 | int cpu = raw_smp_processor_id(); | ||
| 4395 | s64 prev; | ||
| 4396 | u64 now; | ||
| 4397 | |||
| 4398 | now = cpu_clock(cpu); | ||
| 4399 | prev = local64_xchg(&event->hw.prev_count, now); | ||
| 4400 | local64_add(now - prev, &event->count); | ||
| 4401 | } | ||
| 4402 | |||
| 4403 | static int cpu_clock_perf_event_enable(struct perf_event *event) | ||
| 4404 | { | ||
| 4405 | struct hw_perf_event *hwc = &event->hw; | ||
| 4406 | int cpu = raw_smp_processor_id(); | ||
| 4407 | |||
| 4408 | local64_set(&hwc->prev_count, cpu_clock(cpu)); | ||
| 4409 | perf_swevent_start_hrtimer(event); | ||
| 4410 | |||
| 4411 | return 0; | ||
| 4412 | } | ||
| 4413 | |||
| 4414 | static void cpu_clock_perf_event_disable(struct perf_event *event) | ||
| 4415 | { | ||
| 4416 | perf_swevent_cancel_hrtimer(event); | ||
| 4417 | cpu_clock_perf_event_update(event); | ||
| 4418 | } | ||
| 4419 | |||
| 4420 | static void cpu_clock_perf_event_read(struct perf_event *event) | ||
| 4421 | { | ||
| 4422 | cpu_clock_perf_event_update(event); | ||
| 4423 | } | ||
| 4424 | |||
| 4425 | static const struct pmu perf_ops_cpu_clock = { | ||
| 4426 | .enable = cpu_clock_perf_event_enable, | ||
| 4427 | .disable = cpu_clock_perf_event_disable, | ||
| 4428 | .read = cpu_clock_perf_event_read, | ||
| 4429 | }; | ||
| 4430 | |||
| 4431 | /* | ||
| 4432 | * Software event: task time clock | ||
| 4433 | */ | ||
| 4434 | |||
| 4435 | static void task_clock_perf_event_update(struct perf_event *event, u64 now) | ||
| 4436 | { | ||
| 4437 | u64 prev; | ||
| 4438 | s64 delta; | ||
| 4439 | |||
| 4440 | prev = local64_xchg(&event->hw.prev_count, now); | ||
| 4441 | delta = now - prev; | ||
| 4442 | local64_add(delta, &event->count); | ||
| 4443 | } | ||
| 4444 | |||
| 4445 | static int task_clock_perf_event_enable(struct perf_event *event) | ||
| 4446 | { | ||
| 4447 | struct hw_perf_event *hwc = &event->hw; | ||
| 4448 | u64 now; | ||
| 4449 | |||
| 4450 | now = event->ctx->time; | ||
| 4451 | |||
| 4452 | local64_set(&hwc->prev_count, now); | ||
| 4453 | |||
| 4454 | perf_swevent_start_hrtimer(event); | ||
| 4455 | |||
| 4456 | return 0; | ||
| 4457 | } | ||
| 4458 | |||
| 4459 | static void task_clock_perf_event_disable(struct perf_event *event) | ||
| 4460 | { | ||
| 4461 | perf_swevent_cancel_hrtimer(event); | ||
| 4462 | task_clock_perf_event_update(event, event->ctx->time); | ||
| 4463 | |||
| 4464 | } | ||
| 4465 | |||
| 4466 | static void task_clock_perf_event_read(struct perf_event *event) | ||
| 4467 | { | ||
| 4468 | u64 time; | ||
| 4469 | |||
| 4470 | if (!in_nmi()) { | ||
| 4471 | update_context_time(event->ctx); | ||
| 4472 | time = event->ctx->time; | ||
| 4473 | } else { | ||
| 4474 | u64 now = perf_clock(); | ||
| 4475 | u64 delta = now - event->ctx->timestamp; | ||
| 4476 | time = event->ctx->time + delta; | ||
| 4477 | } | ||
| 4478 | |||
| 4479 | task_clock_perf_event_update(event, time); | ||
| 4480 | } | 4554 | } |
| 4481 | 4555 | ||
| 4482 | static const struct pmu perf_ops_task_clock = { | ||
| 4483 | .enable = task_clock_perf_event_enable, | ||
| 4484 | .disable = task_clock_perf_event_disable, | ||
| 4485 | .read = task_clock_perf_event_read, | ||
| 4486 | }; | ||
| 4487 | |||
| 4488 | /* Deref the hlist from the update side */ | 4556 | /* Deref the hlist from the update side */ |
| 4489 | static inline struct swevent_hlist * | 4557 | static inline struct swevent_hlist * |
| 4490 | swevent_hlist_deref(struct perf_cpu_context *cpuctx) | 4558 | swevent_hlist_deref(struct swevent_htable *swhash) |
| 4491 | { | 4559 | { |
| 4492 | return rcu_dereference_protected(cpuctx->swevent_hlist, | 4560 | return rcu_dereference_protected(swhash->swevent_hlist, |
| 4493 | lockdep_is_held(&cpuctx->hlist_mutex)); | 4561 | lockdep_is_held(&swhash->hlist_mutex)); |
| 4494 | } | 4562 | } |
| 4495 | 4563 | ||
| 4496 | static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) | 4564 | static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) |
| @@ -4501,27 +4569,27 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) | |||
| 4501 | kfree(hlist); | 4569 | kfree(hlist); |
| 4502 | } | 4570 | } |
| 4503 | 4571 | ||
| 4504 | static void swevent_hlist_release(struct perf_cpu_context *cpuctx) | 4572 | static void swevent_hlist_release(struct swevent_htable *swhash) |
| 4505 | { | 4573 | { |
| 4506 | struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx); | 4574 | struct swevent_hlist *hlist = swevent_hlist_deref(swhash); |
| 4507 | 4575 | ||
| 4508 | if (!hlist) | 4576 | if (!hlist) |
| 4509 | return; | 4577 | return; |
| 4510 | 4578 | ||
| 4511 | rcu_assign_pointer(cpuctx->swevent_hlist, NULL); | 4579 | rcu_assign_pointer(swhash->swevent_hlist, NULL); |
| 4512 | call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); | 4580 | call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); |
| 4513 | } | 4581 | } |
| 4514 | 4582 | ||
| 4515 | static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) | 4583 | static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) |
| 4516 | { | 4584 | { |
| 4517 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 4585 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
| 4518 | 4586 | ||
| 4519 | mutex_lock(&cpuctx->hlist_mutex); | 4587 | mutex_lock(&swhash->hlist_mutex); |
| 4520 | 4588 | ||
| 4521 | if (!--cpuctx->hlist_refcount) | 4589 | if (!--swhash->hlist_refcount) |
| 4522 | swevent_hlist_release(cpuctx); | 4590 | swevent_hlist_release(swhash); |
| 4523 | 4591 | ||
| 4524 | mutex_unlock(&cpuctx->hlist_mutex); | 4592 | mutex_unlock(&swhash->hlist_mutex); |
| 4525 | } | 4593 | } |
| 4526 | 4594 | ||
| 4527 | static void swevent_hlist_put(struct perf_event *event) | 4595 | static void swevent_hlist_put(struct perf_event *event) |
| @@ -4539,12 +4607,12 @@ static void swevent_hlist_put(struct perf_event *event) | |||
| 4539 | 4607 | ||
| 4540 | static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) | 4608 | static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) |
| 4541 | { | 4609 | { |
| 4542 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 4610 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
| 4543 | int err = 0; | 4611 | int err = 0; |
| 4544 | 4612 | ||
| 4545 | mutex_lock(&cpuctx->hlist_mutex); | 4613 | mutex_lock(&swhash->hlist_mutex); |
| 4546 | 4614 | ||
| 4547 | if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) { | 4615 | if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { |
| 4548 | struct swevent_hlist *hlist; | 4616 | struct swevent_hlist *hlist; |
| 4549 | 4617 | ||
| 4550 | hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); | 4618 | hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); |
| @@ -4552,11 +4620,11 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) | |||
| 4552 | err = -ENOMEM; | 4620 | err = -ENOMEM; |
| 4553 | goto exit; | 4621 | goto exit; |
| 4554 | } | 4622 | } |
| 4555 | rcu_assign_pointer(cpuctx->swevent_hlist, hlist); | 4623 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
| 4556 | } | 4624 | } |
| 4557 | cpuctx->hlist_refcount++; | 4625 | swhash->hlist_refcount++; |
| 4558 | exit: | 4626 | exit: |
| 4559 | mutex_unlock(&cpuctx->hlist_mutex); | 4627 | mutex_unlock(&swhash->hlist_mutex); |
| 4560 | 4628 | ||
| 4561 | return err; | 4629 | return err; |
| 4562 | } | 4630 | } |
| @@ -4580,7 +4648,7 @@ static int swevent_hlist_get(struct perf_event *event) | |||
| 4580 | put_online_cpus(); | 4648 | put_online_cpus(); |
| 4581 | 4649 | ||
| 4582 | return 0; | 4650 | return 0; |
| 4583 | fail: | 4651 | fail: |
| 4584 | for_each_possible_cpu(cpu) { | 4652 | for_each_possible_cpu(cpu) { |
| 4585 | if (cpu == failed_cpu) | 4653 | if (cpu == failed_cpu) |
| 4586 | break; | 4654 | break; |
| @@ -4591,17 +4659,64 @@ static int swevent_hlist_get(struct perf_event *event) | |||
| 4591 | return err; | 4659 | return err; |
| 4592 | } | 4660 | } |
| 4593 | 4661 | ||
| 4594 | #ifdef CONFIG_EVENT_TRACING | 4662 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 4663 | |||
| 4664 | static void sw_perf_event_destroy(struct perf_event *event) | ||
| 4665 | { | ||
| 4666 | u64 event_id = event->attr.config; | ||
| 4667 | |||
| 4668 | WARN_ON(event->parent); | ||
| 4669 | |||
| 4670 | jump_label_dec(&perf_swevent_enabled[event_id]); | ||
| 4671 | swevent_hlist_put(event); | ||
| 4672 | } | ||
| 4673 | |||
| 4674 | static int perf_swevent_init(struct perf_event *event) | ||
| 4675 | { | ||
| 4676 | int event_id = event->attr.config; | ||
| 4677 | |||
| 4678 | if (event->attr.type != PERF_TYPE_SOFTWARE) | ||
| 4679 | return -ENOENT; | ||
| 4680 | |||
| 4681 | switch (event_id) { | ||
| 4682 | case PERF_COUNT_SW_CPU_CLOCK: | ||
| 4683 | case PERF_COUNT_SW_TASK_CLOCK: | ||
| 4684 | return -ENOENT; | ||
| 4685 | |||
| 4686 | default: | ||
| 4687 | break; | ||
| 4688 | } | ||
| 4689 | |||
| 4690 | if (event_id > PERF_COUNT_SW_MAX) | ||
| 4691 | return -ENOENT; | ||
| 4595 | 4692 | ||
| 4596 | static const struct pmu perf_ops_tracepoint = { | 4693 | if (!event->parent) { |
| 4597 | .enable = perf_trace_enable, | 4694 | int err; |
| 4598 | .disable = perf_trace_disable, | 4695 | |
| 4599 | .start = perf_swevent_int, | 4696 | err = swevent_hlist_get(event); |
| 4600 | .stop = perf_swevent_void, | 4697 | if (err) |
| 4698 | return err; | ||
| 4699 | |||
| 4700 | jump_label_inc(&perf_swevent_enabled[event_id]); | ||
| 4701 | event->destroy = sw_perf_event_destroy; | ||
| 4702 | } | ||
| 4703 | |||
| 4704 | return 0; | ||
| 4705 | } | ||
| 4706 | |||
| 4707 | static struct pmu perf_swevent = { | ||
| 4708 | .task_ctx_nr = perf_sw_context, | ||
| 4709 | |||
| 4710 | .event_init = perf_swevent_init, | ||
| 4711 | .add = perf_swevent_add, | ||
| 4712 | .del = perf_swevent_del, | ||
| 4713 | .start = perf_swevent_start, | ||
| 4714 | .stop = perf_swevent_stop, | ||
| 4601 | .read = perf_swevent_read, | 4715 | .read = perf_swevent_read, |
| 4602 | .unthrottle = perf_swevent_void, | ||
| 4603 | }; | 4716 | }; |
| 4604 | 4717 | ||
| 4718 | #ifdef CONFIG_EVENT_TRACING | ||
| 4719 | |||
| 4605 | static int perf_tp_filter_match(struct perf_event *event, | 4720 | static int perf_tp_filter_match(struct perf_event *event, |
| 4606 | struct perf_sample_data *data) | 4721 | struct perf_sample_data *data) |
| 4607 | { | 4722 | { |
| @@ -4645,7 +4760,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | |||
| 4645 | 4760 | ||
| 4646 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 4761 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
| 4647 | if (perf_tp_event_match(event, &data, regs)) | 4762 | if (perf_tp_event_match(event, &data, regs)) |
| 4648 | perf_swevent_add(event, count, 1, &data, regs); | 4763 | perf_swevent_event(event, count, 1, &data, regs); |
| 4649 | } | 4764 | } |
| 4650 | 4765 | ||
| 4651 | perf_swevent_put_recursion_context(rctx); | 4766 | perf_swevent_put_recursion_context(rctx); |
| @@ -4657,10 +4772,13 @@ static void tp_perf_event_destroy(struct perf_event *event) | |||
| 4657 | perf_trace_destroy(event); | 4772 | perf_trace_destroy(event); |
| 4658 | } | 4773 | } |
| 4659 | 4774 | ||
| 4660 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4775 | static int perf_tp_event_init(struct perf_event *event) |
| 4661 | { | 4776 | { |
| 4662 | int err; | 4777 | int err; |
| 4663 | 4778 | ||
| 4779 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | ||
| 4780 | return -ENOENT; | ||
| 4781 | |||
| 4664 | /* | 4782 | /* |
| 4665 | * Raw tracepoint data is a severe data leak, only allow root to | 4783 | * Raw tracepoint data is a severe data leak, only allow root to |
| 4666 | * have these. | 4784 | * have these. |
| @@ -4668,15 +4786,31 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
| 4668 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && | 4786 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && |
| 4669 | perf_paranoid_tracepoint_raw() && | 4787 | perf_paranoid_tracepoint_raw() && |
| 4670 | !capable(CAP_SYS_ADMIN)) | 4788 | !capable(CAP_SYS_ADMIN)) |
| 4671 | return ERR_PTR(-EPERM); | 4789 | return -EPERM; |
| 4672 | 4790 | ||
| 4673 | err = perf_trace_init(event); | 4791 | err = perf_trace_init(event); |
| 4674 | if (err) | 4792 | if (err) |
| 4675 | return NULL; | 4793 | return err; |
| 4676 | 4794 | ||
| 4677 | event->destroy = tp_perf_event_destroy; | 4795 | event->destroy = tp_perf_event_destroy; |
| 4678 | 4796 | ||
| 4679 | return &perf_ops_tracepoint; | 4797 | return 0; |
| 4798 | } | ||
| 4799 | |||
| 4800 | static struct pmu perf_tracepoint = { | ||
| 4801 | .task_ctx_nr = perf_sw_context, | ||
| 4802 | |||
| 4803 | .event_init = perf_tp_event_init, | ||
| 4804 | .add = perf_trace_add, | ||
| 4805 | .del = perf_trace_del, | ||
| 4806 | .start = perf_swevent_start, | ||
| 4807 | .stop = perf_swevent_stop, | ||
| 4808 | .read = perf_swevent_read, | ||
| 4809 | }; | ||
| 4810 | |||
| 4811 | static inline void perf_tp_register(void) | ||
| 4812 | { | ||
| 4813 | perf_pmu_register(&perf_tracepoint); | ||
| 4680 | } | 4814 | } |
| 4681 | 4815 | ||
| 4682 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4816 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| @@ -4704,9 +4838,8 @@ static void perf_event_free_filter(struct perf_event *event) | |||
| 4704 | 4838 | ||
| 4705 | #else | 4839 | #else |
| 4706 | 4840 | ||
| 4707 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4841 | static inline void perf_tp_register(void) |
| 4708 | { | 4842 | { |
| 4709 | return NULL; | ||
| 4710 | } | 4843 | } |
| 4711 | 4844 | ||
| 4712 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4845 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| @@ -4721,105 +4854,389 @@ static void perf_event_free_filter(struct perf_event *event) | |||
| 4721 | #endif /* CONFIG_EVENT_TRACING */ | 4854 | #endif /* CONFIG_EVENT_TRACING */ |
| 4722 | 4855 | ||
| 4723 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 4856 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 4724 | static void bp_perf_event_destroy(struct perf_event *event) | 4857 | void perf_bp_event(struct perf_event *bp, void *data) |
| 4725 | { | 4858 | { |
| 4726 | release_bp_slot(event); | 4859 | struct perf_sample_data sample; |
| 4860 | struct pt_regs *regs = data; | ||
| 4861 | |||
| 4862 | perf_sample_data_init(&sample, bp->attr.bp_addr); | ||
| 4863 | |||
| 4864 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) | ||
| 4865 | perf_swevent_event(bp, 1, 1, &sample, regs); | ||
| 4727 | } | 4866 | } |
| 4867 | #endif | ||
| 4868 | |||
| 4869 | /* | ||
| 4870 | * hrtimer based swevent callback | ||
| 4871 | */ | ||
| 4728 | 4872 | ||
| 4729 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | 4873 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) |
| 4730 | { | 4874 | { |
| 4731 | int err; | 4875 | enum hrtimer_restart ret = HRTIMER_RESTART; |
| 4876 | struct perf_sample_data data; | ||
| 4877 | struct pt_regs *regs; | ||
| 4878 | struct perf_event *event; | ||
| 4879 | u64 period; | ||
| 4732 | 4880 | ||
| 4733 | err = register_perf_hw_breakpoint(bp); | 4881 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); |
| 4734 | if (err) | 4882 | event->pmu->read(event); |
| 4735 | return ERR_PTR(err); | 4883 | |
| 4884 | perf_sample_data_init(&data, 0); | ||
| 4885 | data.period = event->hw.last_period; | ||
| 4886 | regs = get_irq_regs(); | ||
| 4887 | |||
| 4888 | if (regs && !perf_exclude_event(event, regs)) { | ||
| 4889 | if (!(event->attr.exclude_idle && current->pid == 0)) | ||
| 4890 | if (perf_event_overflow(event, 0, &data, regs)) | ||
| 4891 | ret = HRTIMER_NORESTART; | ||
| 4892 | } | ||
| 4736 | 4893 | ||
| 4737 | bp->destroy = bp_perf_event_destroy; | 4894 | period = max_t(u64, 10000, event->hw.sample_period); |
| 4895 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
| 4738 | 4896 | ||
| 4739 | return &perf_ops_bp; | 4897 | return ret; |
| 4740 | } | 4898 | } |
| 4741 | 4899 | ||
| 4742 | void perf_bp_event(struct perf_event *bp, void *data) | 4900 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
| 4743 | { | 4901 | { |
| 4744 | struct perf_sample_data sample; | 4902 | struct hw_perf_event *hwc = &event->hw; |
| 4745 | struct pt_regs *regs = data; | ||
| 4746 | 4903 | ||
| 4747 | perf_sample_data_init(&sample, bp->attr.bp_addr); | 4904 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 4905 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
| 4906 | if (hwc->sample_period) { | ||
| 4907 | s64 period = local64_read(&hwc->period_left); | ||
| 4908 | |||
| 4909 | if (period) { | ||
| 4910 | if (period < 0) | ||
| 4911 | period = 10000; | ||
| 4748 | 4912 | ||
| 4749 | if (!perf_exclude_event(bp, regs)) | 4913 | local64_set(&hwc->period_left, 0); |
| 4750 | perf_swevent_add(bp, 1, 1, &sample, regs); | 4914 | } else { |
| 4915 | period = max_t(u64, 10000, hwc->sample_period); | ||
| 4916 | } | ||
| 4917 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
| 4918 | ns_to_ktime(period), 0, | ||
| 4919 | HRTIMER_MODE_REL_PINNED, 0); | ||
| 4920 | } | ||
| 4751 | } | 4921 | } |
| 4752 | #else | 4922 | |
| 4753 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | 4923 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
| 4754 | { | 4924 | { |
| 4755 | return NULL; | 4925 | struct hw_perf_event *hwc = &event->hw; |
| 4926 | |||
| 4927 | if (hwc->sample_period) { | ||
| 4928 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
| 4929 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); | ||
| 4930 | |||
| 4931 | hrtimer_cancel(&hwc->hrtimer); | ||
| 4932 | } | ||
| 4756 | } | 4933 | } |
| 4757 | 4934 | ||
| 4758 | void perf_bp_event(struct perf_event *bp, void *regs) | 4935 | /* |
| 4936 | * Software event: cpu wall time clock | ||
| 4937 | */ | ||
| 4938 | |||
| 4939 | static void cpu_clock_event_update(struct perf_event *event) | ||
| 4759 | { | 4940 | { |
| 4941 | s64 prev; | ||
| 4942 | u64 now; | ||
| 4943 | |||
| 4944 | now = local_clock(); | ||
| 4945 | prev = local64_xchg(&event->hw.prev_count, now); | ||
| 4946 | local64_add(now - prev, &event->count); | ||
| 4760 | } | 4947 | } |
| 4761 | #endif | ||
| 4762 | 4948 | ||
| 4763 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4949 | static void cpu_clock_event_start(struct perf_event *event, int flags) |
| 4950 | { | ||
| 4951 | local64_set(&event->hw.prev_count, local_clock()); | ||
| 4952 | perf_swevent_start_hrtimer(event); | ||
| 4953 | } | ||
| 4764 | 4954 | ||
| 4765 | static void sw_perf_event_destroy(struct perf_event *event) | 4955 | static void cpu_clock_event_stop(struct perf_event *event, int flags) |
| 4766 | { | 4956 | { |
| 4767 | u64 event_id = event->attr.config; | 4957 | perf_swevent_cancel_hrtimer(event); |
| 4958 | cpu_clock_event_update(event); | ||
| 4959 | } | ||
| 4768 | 4960 | ||
| 4769 | WARN_ON(event->parent); | 4961 | static int cpu_clock_event_add(struct perf_event *event, int flags) |
| 4962 | { | ||
| 4963 | if (flags & PERF_EF_START) | ||
| 4964 | cpu_clock_event_start(event, flags); | ||
| 4770 | 4965 | ||
| 4771 | atomic_dec(&perf_swevent_enabled[event_id]); | 4966 | return 0; |
| 4772 | swevent_hlist_put(event); | ||
| 4773 | } | 4967 | } |
| 4774 | 4968 | ||
| 4775 | static const struct pmu *sw_perf_event_init(struct perf_event *event) | 4969 | static void cpu_clock_event_del(struct perf_event *event, int flags) |
| 4776 | { | 4970 | { |
| 4777 | const struct pmu *pmu = NULL; | 4971 | cpu_clock_event_stop(event, flags); |
| 4778 | u64 event_id = event->attr.config; | 4972 | } |
| 4973 | |||
| 4974 | static void cpu_clock_event_read(struct perf_event *event) | ||
| 4975 | { | ||
| 4976 | cpu_clock_event_update(event); | ||
| 4977 | } | ||
| 4978 | |||
| 4979 | static int cpu_clock_event_init(struct perf_event *event) | ||
| 4980 | { | ||
| 4981 | if (event->attr.type != PERF_TYPE_SOFTWARE) | ||
| 4982 | return -ENOENT; | ||
| 4983 | |||
| 4984 | if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) | ||
| 4985 | return -ENOENT; | ||
| 4986 | |||
| 4987 | return 0; | ||
| 4988 | } | ||
| 4989 | |||
| 4990 | static struct pmu perf_cpu_clock = { | ||
| 4991 | .task_ctx_nr = perf_sw_context, | ||
| 4992 | |||
| 4993 | .event_init = cpu_clock_event_init, | ||
| 4994 | .add = cpu_clock_event_add, | ||
| 4995 | .del = cpu_clock_event_del, | ||
| 4996 | .start = cpu_clock_event_start, | ||
| 4997 | .stop = cpu_clock_event_stop, | ||
| 4998 | .read = cpu_clock_event_read, | ||
| 4999 | }; | ||
| 5000 | |||
| 5001 | /* | ||
| 5002 | * Software event: task time clock | ||
| 5003 | */ | ||
| 5004 | |||
| 5005 | static void task_clock_event_update(struct perf_event *event, u64 now) | ||
| 5006 | { | ||
| 5007 | u64 prev; | ||
| 5008 | s64 delta; | ||
| 5009 | |||
| 5010 | prev = local64_xchg(&event->hw.prev_count, now); | ||
| 5011 | delta = now - prev; | ||
| 5012 | local64_add(delta, &event->count); | ||
| 5013 | } | ||
| 5014 | |||
| 5015 | static void task_clock_event_start(struct perf_event *event, int flags) | ||
| 5016 | { | ||
| 5017 | local64_set(&event->hw.prev_count, event->ctx->time); | ||
| 5018 | perf_swevent_start_hrtimer(event); | ||
| 5019 | } | ||
| 4779 | 5020 | ||
| 5021 | static void task_clock_event_stop(struct perf_event *event, int flags) | ||
| 5022 | { | ||
| 5023 | perf_swevent_cancel_hrtimer(event); | ||
| 5024 | task_clock_event_update(event, event->ctx->time); | ||
| 5025 | } | ||
| 5026 | |||
| 5027 | static int task_clock_event_add(struct perf_event *event, int flags) | ||
| 5028 | { | ||
| 5029 | if (flags & PERF_EF_START) | ||
| 5030 | task_clock_event_start(event, flags); | ||
| 5031 | |||
| 5032 | return 0; | ||
| 5033 | } | ||
| 5034 | |||
| 5035 | static void task_clock_event_del(struct perf_event *event, int flags) | ||
| 5036 | { | ||
| 5037 | task_clock_event_stop(event, PERF_EF_UPDATE); | ||
| 5038 | } | ||
| 5039 | |||
| 5040 | static void task_clock_event_read(struct perf_event *event) | ||
| 5041 | { | ||
| 5042 | u64 time; | ||
| 5043 | |||
| 5044 | if (!in_nmi()) { | ||
| 5045 | update_context_time(event->ctx); | ||
| 5046 | time = event->ctx->time; | ||
| 5047 | } else { | ||
| 5048 | u64 now = perf_clock(); | ||
| 5049 | u64 delta = now - event->ctx->timestamp; | ||
| 5050 | time = event->ctx->time + delta; | ||
| 5051 | } | ||
| 5052 | |||
| 5053 | task_clock_event_update(event, time); | ||
| 5054 | } | ||
| 5055 | |||
| 5056 | static int task_clock_event_init(struct perf_event *event) | ||
| 5057 | { | ||
| 5058 | if (event->attr.type != PERF_TYPE_SOFTWARE) | ||
| 5059 | return -ENOENT; | ||
| 5060 | |||
| 5061 | if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) | ||
| 5062 | return -ENOENT; | ||
| 5063 | |||
| 5064 | return 0; | ||
| 5065 | } | ||
| 5066 | |||
| 5067 | static struct pmu perf_task_clock = { | ||
| 5068 | .task_ctx_nr = perf_sw_context, | ||
| 5069 | |||
| 5070 | .event_init = task_clock_event_init, | ||
| 5071 | .add = task_clock_event_add, | ||
| 5072 | .del = task_clock_event_del, | ||
| 5073 | .start = task_clock_event_start, | ||
| 5074 | .stop = task_clock_event_stop, | ||
| 5075 | .read = task_clock_event_read, | ||
| 5076 | }; | ||
| 5077 | |||
| 5078 | static void perf_pmu_nop_void(struct pmu *pmu) | ||
| 5079 | { | ||
| 5080 | } | ||
| 5081 | |||
| 5082 | static int perf_pmu_nop_int(struct pmu *pmu) | ||
| 5083 | { | ||
| 5084 | return 0; | ||
| 5085 | } | ||
| 5086 | |||
| 5087 | static void perf_pmu_start_txn(struct pmu *pmu) | ||
| 5088 | { | ||
| 5089 | perf_pmu_disable(pmu); | ||
| 5090 | } | ||
| 5091 | |||
| 5092 | static int perf_pmu_commit_txn(struct pmu *pmu) | ||
| 5093 | { | ||
| 5094 | perf_pmu_enable(pmu); | ||
| 5095 | return 0; | ||
| 5096 | } | ||
| 5097 | |||
| 5098 | static void perf_pmu_cancel_txn(struct pmu *pmu) | ||
| 5099 | { | ||
| 5100 | perf_pmu_enable(pmu); | ||
| 5101 | } | ||
| 5102 | |||
| 5103 | /* | ||
| 5104 | * Ensures all contexts with the same task_ctx_nr have the same | ||
| 5105 | * pmu_cpu_context too. | ||
| 5106 | */ | ||
| 5107 | static void *find_pmu_context(int ctxn) | ||
| 5108 | { | ||
| 5109 | struct pmu *pmu; | ||
| 5110 | |||
| 5111 | if (ctxn < 0) | ||
| 5112 | return NULL; | ||
| 5113 | |||
| 5114 | list_for_each_entry(pmu, &pmus, entry) { | ||
| 5115 | if (pmu->task_ctx_nr == ctxn) | ||
| 5116 | return pmu->pmu_cpu_context; | ||
| 5117 | } | ||
| 5118 | |||
| 5119 | return NULL; | ||
| 5120 | } | ||
| 5121 | |||
| 5122 | static void free_pmu_context(void * __percpu cpu_context) | ||
| 5123 | { | ||
| 5124 | struct pmu *pmu; | ||
| 5125 | |||
| 5126 | mutex_lock(&pmus_lock); | ||
| 4780 | /* | 5127 | /* |
| 4781 | * Software events (currently) can't in general distinguish | 5128 | * Like a real lame refcount. |
| 4782 | * between user, kernel and hypervisor events. | ||
| 4783 | * However, context switches and cpu migrations are considered | ||
| 4784 | * to be kernel events, and page faults are never hypervisor | ||
| 4785 | * events. | ||
| 4786 | */ | 5129 | */ |
| 4787 | switch (event_id) { | 5130 | list_for_each_entry(pmu, &pmus, entry) { |
| 4788 | case PERF_COUNT_SW_CPU_CLOCK: | 5131 | if (pmu->pmu_cpu_context == cpu_context) |
| 4789 | pmu = &perf_ops_cpu_clock; | 5132 | goto out; |
| 5133 | } | ||
| 4790 | 5134 | ||
| 4791 | break; | 5135 | free_percpu(cpu_context); |
| 4792 | case PERF_COUNT_SW_TASK_CLOCK: | 5136 | out: |
| 4793 | /* | 5137 | mutex_unlock(&pmus_lock); |
| 4794 | * If the user instantiates this as a per-cpu event, | 5138 | } |
| 4795 | * use the cpu_clock event instead. | ||
| 4796 | */ | ||
| 4797 | if (event->ctx->task) | ||
| 4798 | pmu = &perf_ops_task_clock; | ||
| 4799 | else | ||
| 4800 | pmu = &perf_ops_cpu_clock; | ||
| 4801 | 5139 | ||
| 4802 | break; | 5140 | int perf_pmu_register(struct pmu *pmu) |
| 4803 | case PERF_COUNT_SW_PAGE_FAULTS: | 5141 | { |
| 4804 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | 5142 | int cpu, ret; |
| 4805 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 5143 | |
| 4806 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 5144 | mutex_lock(&pmus_lock); |
| 4807 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 5145 | ret = -ENOMEM; |
| 4808 | case PERF_COUNT_SW_ALIGNMENT_FAULTS: | 5146 | pmu->pmu_disable_count = alloc_percpu(int); |
| 4809 | case PERF_COUNT_SW_EMULATION_FAULTS: | 5147 | if (!pmu->pmu_disable_count) |
| 4810 | if (!event->parent) { | 5148 | goto unlock; |
| 4811 | int err; | 5149 | |
| 4812 | 5150 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); | |
| 4813 | err = swevent_hlist_get(event); | 5151 | if (pmu->pmu_cpu_context) |
| 4814 | if (err) | 5152 | goto got_cpu_context; |
| 4815 | return ERR_PTR(err); | 5153 | |
| 5154 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | ||
| 5155 | if (!pmu->pmu_cpu_context) | ||
| 5156 | goto free_pdc; | ||
| 5157 | |||
| 5158 | for_each_possible_cpu(cpu) { | ||
| 5159 | struct perf_cpu_context *cpuctx; | ||
| 4816 | 5160 | ||
| 4817 | atomic_inc(&perf_swevent_enabled[event_id]); | 5161 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 4818 | event->destroy = sw_perf_event_destroy; | 5162 | __perf_event_init_context(&cpuctx->ctx); |
| 5163 | cpuctx->ctx.type = cpu_context; | ||
| 5164 | cpuctx->ctx.pmu = pmu; | ||
| 5165 | cpuctx->jiffies_interval = 1; | ||
| 5166 | INIT_LIST_HEAD(&cpuctx->rotation_list); | ||
| 5167 | } | ||
| 5168 | |||
| 5169 | got_cpu_context: | ||
| 5170 | if (!pmu->start_txn) { | ||
| 5171 | if (pmu->pmu_enable) { | ||
| 5172 | /* | ||
| 5173 | * If we have pmu_enable/pmu_disable calls, install | ||
| 5174 | * transaction stubs that use that to try and batch | ||
| 5175 | * hardware accesses. | ||
| 5176 | */ | ||
| 5177 | pmu->start_txn = perf_pmu_start_txn; | ||
| 5178 | pmu->commit_txn = perf_pmu_commit_txn; | ||
| 5179 | pmu->cancel_txn = perf_pmu_cancel_txn; | ||
| 5180 | } else { | ||
| 5181 | pmu->start_txn = perf_pmu_nop_void; | ||
| 5182 | pmu->commit_txn = perf_pmu_nop_int; | ||
| 5183 | pmu->cancel_txn = perf_pmu_nop_void; | ||
| 5184 | } | ||
| 5185 | } | ||
| 5186 | |||
| 5187 | if (!pmu->pmu_enable) { | ||
| 5188 | pmu->pmu_enable = perf_pmu_nop_void; | ||
| 5189 | pmu->pmu_disable = perf_pmu_nop_void; | ||
| 5190 | } | ||
| 5191 | |||
| 5192 | list_add_rcu(&pmu->entry, &pmus); | ||
| 5193 | ret = 0; | ||
| 5194 | unlock: | ||
| 5195 | mutex_unlock(&pmus_lock); | ||
| 5196 | |||
| 5197 | return ret; | ||
| 5198 | |||
| 5199 | free_pdc: | ||
| 5200 | free_percpu(pmu->pmu_disable_count); | ||
| 5201 | goto unlock; | ||
| 5202 | } | ||
| 5203 | |||
| 5204 | void perf_pmu_unregister(struct pmu *pmu) | ||
| 5205 | { | ||
| 5206 | mutex_lock(&pmus_lock); | ||
| 5207 | list_del_rcu(&pmu->entry); | ||
| 5208 | mutex_unlock(&pmus_lock); | ||
| 5209 | |||
| 5210 | /* | ||
| 5211 | * We dereference the pmu list under both SRCU and regular RCU, so | ||
| 5212 | * synchronize against both of those. | ||
| 5213 | */ | ||
| 5214 | synchronize_srcu(&pmus_srcu); | ||
| 5215 | synchronize_rcu(); | ||
| 5216 | |||
| 5217 | free_percpu(pmu->pmu_disable_count); | ||
| 5218 | free_pmu_context(pmu->pmu_cpu_context); | ||
| 5219 | } | ||
| 5220 | |||
| 5221 | struct pmu *perf_init_event(struct perf_event *event) | ||
| 5222 | { | ||
| 5223 | struct pmu *pmu = NULL; | ||
| 5224 | int idx; | ||
| 5225 | |||
| 5226 | idx = srcu_read_lock(&pmus_srcu); | ||
| 5227 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
| 5228 | int ret = pmu->event_init(event); | ||
| 5229 | if (!ret) | ||
| 5230 | goto unlock; | ||
| 5231 | |||
| 5232 | if (ret != -ENOENT) { | ||
| 5233 | pmu = ERR_PTR(ret); | ||
| 5234 | goto unlock; | ||
| 4819 | } | 5235 | } |
| 4820 | pmu = &perf_ops_generic; | ||
| 4821 | break; | ||
| 4822 | } | 5236 | } |
| 5237 | pmu = ERR_PTR(-ENOENT); | ||
| 5238 | unlock: | ||
| 5239 | srcu_read_unlock(&pmus_srcu, idx); | ||
| 4823 | 5240 | ||
| 4824 | return pmu; | 5241 | return pmu; |
| 4825 | } | 5242 | } |
| @@ -4828,20 +5245,18 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event) | |||
| 4828 | * Allocate and initialize a event structure | 5245 | * Allocate and initialize a event structure |
| 4829 | */ | 5246 | */ |
| 4830 | static struct perf_event * | 5247 | static struct perf_event * |
| 4831 | perf_event_alloc(struct perf_event_attr *attr, | 5248 | perf_event_alloc(struct perf_event_attr *attr, int cpu, |
| 4832 | int cpu, | 5249 | struct task_struct *task, |
| 4833 | struct perf_event_context *ctx, | 5250 | struct perf_event *group_leader, |
| 4834 | struct perf_event *group_leader, | 5251 | struct perf_event *parent_event, |
| 4835 | struct perf_event *parent_event, | 5252 | perf_overflow_handler_t overflow_handler) |
| 4836 | perf_overflow_handler_t overflow_handler, | 5253 | { |
| 4837 | gfp_t gfpflags) | 5254 | struct pmu *pmu; |
| 4838 | { | ||
| 4839 | const struct pmu *pmu; | ||
| 4840 | struct perf_event *event; | 5255 | struct perf_event *event; |
| 4841 | struct hw_perf_event *hwc; | 5256 | struct hw_perf_event *hwc; |
| 4842 | long err; | 5257 | long err; |
| 4843 | 5258 | ||
| 4844 | event = kzalloc(sizeof(*event), gfpflags); | 5259 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 4845 | if (!event) | 5260 | if (!event) |
| 4846 | return ERR_PTR(-ENOMEM); | 5261 | return ERR_PTR(-ENOMEM); |
| 4847 | 5262 | ||
| @@ -4859,6 +5274,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
| 4859 | INIT_LIST_HEAD(&event->event_entry); | 5274 | INIT_LIST_HEAD(&event->event_entry); |
| 4860 | INIT_LIST_HEAD(&event->sibling_list); | 5275 | INIT_LIST_HEAD(&event->sibling_list); |
| 4861 | init_waitqueue_head(&event->waitq); | 5276 | init_waitqueue_head(&event->waitq); |
| 5277 | init_irq_work(&event->pending, perf_pending_event); | ||
| 4862 | 5278 | ||
| 4863 | mutex_init(&event->mmap_mutex); | 5279 | mutex_init(&event->mmap_mutex); |
| 4864 | 5280 | ||
| @@ -4866,7 +5282,6 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
| 4866 | event->attr = *attr; | 5282 | event->attr = *attr; |
| 4867 | event->group_leader = group_leader; | 5283 | event->group_leader = group_leader; |
| 4868 | event->pmu = NULL; | 5284 | event->pmu = NULL; |
| 4869 | event->ctx = ctx; | ||
| 4870 | event->oncpu = -1; | 5285 | event->oncpu = -1; |
| 4871 | 5286 | ||
| 4872 | event->parent = parent_event; | 5287 | event->parent = parent_event; |
| @@ -4876,6 +5291,17 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
| 4876 | 5291 | ||
| 4877 | event->state = PERF_EVENT_STATE_INACTIVE; | 5292 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 4878 | 5293 | ||
| 5294 | if (task) { | ||
| 5295 | event->attach_state = PERF_ATTACH_TASK; | ||
| 5296 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 5297 | /* | ||
| 5298 | * hw_breakpoint is a bit difficult here.. | ||
| 5299 | */ | ||
| 5300 | if (attr->type == PERF_TYPE_BREAKPOINT) | ||
| 5301 | event->hw.bp_target = task; | ||
| 5302 | #endif | ||
| 5303 | } | ||
| 5304 | |||
| 4879 | if (!overflow_handler && parent_event) | 5305 | if (!overflow_handler && parent_event) |
| 4880 | overflow_handler = parent_event->overflow_handler; | 5306 | overflow_handler = parent_event->overflow_handler; |
| 4881 | 5307 | ||
| @@ -4900,29 +5326,8 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
| 4900 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | 5326 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) |
| 4901 | goto done; | 5327 | goto done; |
| 4902 | 5328 | ||
| 4903 | switch (attr->type) { | 5329 | pmu = perf_init_event(event); |
| 4904 | case PERF_TYPE_RAW: | ||
| 4905 | case PERF_TYPE_HARDWARE: | ||
| 4906 | case PERF_TYPE_HW_CACHE: | ||
| 4907 | pmu = hw_perf_event_init(event); | ||
| 4908 | break; | ||
| 4909 | 5330 | ||
| 4910 | case PERF_TYPE_SOFTWARE: | ||
| 4911 | pmu = sw_perf_event_init(event); | ||
| 4912 | break; | ||
| 4913 | |||
| 4914 | case PERF_TYPE_TRACEPOINT: | ||
| 4915 | pmu = tp_perf_event_init(event); | ||
| 4916 | break; | ||
| 4917 | |||
| 4918 | case PERF_TYPE_BREAKPOINT: | ||
| 4919 | pmu = bp_perf_event_init(event); | ||
| 4920 | break; | ||
| 4921 | |||
| 4922 | |||
| 4923 | default: | ||
| 4924 | break; | ||
| 4925 | } | ||
| 4926 | done: | 5331 | done: |
| 4927 | err = 0; | 5332 | err = 0; |
| 4928 | if (!pmu) | 5333 | if (!pmu) |
| @@ -4940,13 +5345,21 @@ done: | |||
| 4940 | event->pmu = pmu; | 5345 | event->pmu = pmu; |
| 4941 | 5346 | ||
| 4942 | if (!event->parent) { | 5347 | if (!event->parent) { |
| 4943 | atomic_inc(&nr_events); | 5348 | if (event->attach_state & PERF_ATTACH_TASK) |
| 5349 | jump_label_inc(&perf_task_events); | ||
| 4944 | if (event->attr.mmap || event->attr.mmap_data) | 5350 | if (event->attr.mmap || event->attr.mmap_data) |
| 4945 | atomic_inc(&nr_mmap_events); | 5351 | atomic_inc(&nr_mmap_events); |
| 4946 | if (event->attr.comm) | 5352 | if (event->attr.comm) |
| 4947 | atomic_inc(&nr_comm_events); | 5353 | atomic_inc(&nr_comm_events); |
| 4948 | if (event->attr.task) | 5354 | if (event->attr.task) |
| 4949 | atomic_inc(&nr_task_events); | 5355 | atomic_inc(&nr_task_events); |
| 5356 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
| 5357 | err = get_callchain_buffers(); | ||
| 5358 | if (err) { | ||
| 5359 | free_event(event); | ||
| 5360 | return ERR_PTR(err); | ||
| 5361 | } | ||
| 5362 | } | ||
| 4950 | } | 5363 | } |
| 4951 | 5364 | ||
| 4952 | return event; | 5365 | return event; |
| @@ -5094,12 +5507,16 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5094 | struct perf_event_attr __user *, attr_uptr, | 5507 | struct perf_event_attr __user *, attr_uptr, |
| 5095 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | 5508 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
| 5096 | { | 5509 | { |
| 5097 | struct perf_event *event, *group_leader = NULL, *output_event = NULL; | 5510 | struct perf_event *group_leader = NULL, *output_event = NULL; |
| 5511 | struct perf_event *event, *sibling; | ||
| 5098 | struct perf_event_attr attr; | 5512 | struct perf_event_attr attr; |
| 5099 | struct perf_event_context *ctx; | 5513 | struct perf_event_context *ctx; |
| 5100 | struct file *event_file = NULL; | 5514 | struct file *event_file = NULL; |
| 5101 | struct file *group_file = NULL; | 5515 | struct file *group_file = NULL; |
| 5516 | struct task_struct *task = NULL; | ||
| 5517 | struct pmu *pmu; | ||
| 5102 | int event_fd; | 5518 | int event_fd; |
| 5519 | int move_group = 0; | ||
| 5103 | int fput_needed = 0; | 5520 | int fput_needed = 0; |
| 5104 | int err; | 5521 | int err; |
| 5105 | 5522 | ||
| @@ -5125,20 +5542,11 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5125 | if (event_fd < 0) | 5542 | if (event_fd < 0) |
| 5126 | return event_fd; | 5543 | return event_fd; |
| 5127 | 5544 | ||
| 5128 | /* | ||
| 5129 | * Get the target context (task or percpu): | ||
| 5130 | */ | ||
| 5131 | ctx = find_get_context(pid, cpu); | ||
| 5132 | if (IS_ERR(ctx)) { | ||
| 5133 | err = PTR_ERR(ctx); | ||
| 5134 | goto err_fd; | ||
| 5135 | } | ||
| 5136 | |||
| 5137 | if (group_fd != -1) { | 5545 | if (group_fd != -1) { |
| 5138 | group_leader = perf_fget_light(group_fd, &fput_needed); | 5546 | group_leader = perf_fget_light(group_fd, &fput_needed); |
| 5139 | if (IS_ERR(group_leader)) { | 5547 | if (IS_ERR(group_leader)) { |
| 5140 | err = PTR_ERR(group_leader); | 5548 | err = PTR_ERR(group_leader); |
| 5141 | goto err_put_context; | 5549 | goto err_fd; |
| 5142 | } | 5550 | } |
| 5143 | group_file = group_leader->filp; | 5551 | group_file = group_leader->filp; |
| 5144 | if (flags & PERF_FLAG_FD_OUTPUT) | 5552 | if (flags & PERF_FLAG_FD_OUTPUT) |
| @@ -5147,6 +5555,58 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5147 | group_leader = NULL; | 5555 | group_leader = NULL; |
| 5148 | } | 5556 | } |
| 5149 | 5557 | ||
| 5558 | if (pid != -1) { | ||
| 5559 | task = find_lively_task_by_vpid(pid); | ||
| 5560 | if (IS_ERR(task)) { | ||
| 5561 | err = PTR_ERR(task); | ||
| 5562 | goto err_group_fd; | ||
| 5563 | } | ||
| 5564 | } | ||
| 5565 | |||
| 5566 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); | ||
| 5567 | if (IS_ERR(event)) { | ||
| 5568 | err = PTR_ERR(event); | ||
| 5569 | goto err_task; | ||
| 5570 | } | ||
| 5571 | |||
| 5572 | /* | ||
| 5573 | * Special case software events and allow them to be part of | ||
| 5574 | * any hardware group. | ||
| 5575 | */ | ||
| 5576 | pmu = event->pmu; | ||
| 5577 | |||
| 5578 | if (group_leader && | ||
| 5579 | (is_software_event(event) != is_software_event(group_leader))) { | ||
| 5580 | if (is_software_event(event)) { | ||
| 5581 | /* | ||
| 5582 | * If event and group_leader are not both a software | ||
| 5583 | * event, and event is, then group leader is not. | ||
| 5584 | * | ||
| 5585 | * Allow the addition of software events to !software | ||
| 5586 | * groups, this is safe because software events never | ||
| 5587 | * fail to schedule. | ||
| 5588 | */ | ||
| 5589 | pmu = group_leader->pmu; | ||
| 5590 | } else if (is_software_event(group_leader) && | ||
| 5591 | (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { | ||
| 5592 | /* | ||
| 5593 | * In case the group is a pure software group, and we | ||
| 5594 | * try to add a hardware event, move the whole group to | ||
| 5595 | * the hardware context. | ||
| 5596 | */ | ||
| 5597 | move_group = 1; | ||
| 5598 | } | ||
| 5599 | } | ||
| 5600 | |||
| 5601 | /* | ||
| 5602 | * Get the target context (task or percpu): | ||
| 5603 | */ | ||
| 5604 | ctx = find_get_context(pmu, task, cpu); | ||
| 5605 | if (IS_ERR(ctx)) { | ||
| 5606 | err = PTR_ERR(ctx); | ||
| 5607 | goto err_alloc; | ||
| 5608 | } | ||
| 5609 | |||
| 5150 | /* | 5610 | /* |
| 5151 | * Look up the group leader (we will attach this event to it): | 5611 | * Look up the group leader (we will attach this event to it): |
| 5152 | */ | 5612 | */ |
| @@ -5158,42 +5618,66 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5158 | * becoming part of another group-sibling): | 5618 | * becoming part of another group-sibling): |
| 5159 | */ | 5619 | */ |
| 5160 | if (group_leader->group_leader != group_leader) | 5620 | if (group_leader->group_leader != group_leader) |
| 5161 | goto err_put_context; | 5621 | goto err_context; |
| 5162 | /* | 5622 | /* |
| 5163 | * Do not allow to attach to a group in a different | 5623 | * Do not allow to attach to a group in a different |
| 5164 | * task or CPU context: | 5624 | * task or CPU context: |
| 5165 | */ | 5625 | */ |
| 5166 | if (group_leader->ctx != ctx) | 5626 | if (move_group) { |
| 5167 | goto err_put_context; | 5627 | if (group_leader->ctx->type != ctx->type) |
| 5628 | goto err_context; | ||
| 5629 | } else { | ||
| 5630 | if (group_leader->ctx != ctx) | ||
| 5631 | goto err_context; | ||
| 5632 | } | ||
| 5633 | |||
| 5168 | /* | 5634 | /* |
| 5169 | * Only a group leader can be exclusive or pinned | 5635 | * Only a group leader can be exclusive or pinned |
| 5170 | */ | 5636 | */ |
| 5171 | if (attr.exclusive || attr.pinned) | 5637 | if (attr.exclusive || attr.pinned) |
| 5172 | goto err_put_context; | 5638 | goto err_context; |
| 5173 | } | ||
| 5174 | |||
| 5175 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, | ||
| 5176 | NULL, NULL, GFP_KERNEL); | ||
| 5177 | if (IS_ERR(event)) { | ||
| 5178 | err = PTR_ERR(event); | ||
| 5179 | goto err_put_context; | ||
| 5180 | } | 5639 | } |
| 5181 | 5640 | ||
| 5182 | if (output_event) { | 5641 | if (output_event) { |
| 5183 | err = perf_event_set_output(event, output_event); | 5642 | err = perf_event_set_output(event, output_event); |
| 5184 | if (err) | 5643 | if (err) |
| 5185 | goto err_free_put_context; | 5644 | goto err_context; |
| 5186 | } | 5645 | } |
| 5187 | 5646 | ||
| 5188 | event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); | 5647 | event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); |
| 5189 | if (IS_ERR(event_file)) { | 5648 | if (IS_ERR(event_file)) { |
| 5190 | err = PTR_ERR(event_file); | 5649 | err = PTR_ERR(event_file); |
| 5191 | goto err_free_put_context; | 5650 | goto err_context; |
| 5651 | } | ||
| 5652 | |||
| 5653 | if (move_group) { | ||
| 5654 | struct perf_event_context *gctx = group_leader->ctx; | ||
| 5655 | |||
| 5656 | mutex_lock(&gctx->mutex); | ||
| 5657 | perf_event_remove_from_context(group_leader); | ||
| 5658 | list_for_each_entry(sibling, &group_leader->sibling_list, | ||
| 5659 | group_entry) { | ||
| 5660 | perf_event_remove_from_context(sibling); | ||
| 5661 | put_ctx(gctx); | ||
| 5662 | } | ||
| 5663 | mutex_unlock(&gctx->mutex); | ||
| 5664 | put_ctx(gctx); | ||
| 5192 | } | 5665 | } |
| 5193 | 5666 | ||
| 5194 | event->filp = event_file; | 5667 | event->filp = event_file; |
| 5195 | WARN_ON_ONCE(ctx->parent_ctx); | 5668 | WARN_ON_ONCE(ctx->parent_ctx); |
| 5196 | mutex_lock(&ctx->mutex); | 5669 | mutex_lock(&ctx->mutex); |
| 5670 | |||
| 5671 | if (move_group) { | ||
| 5672 | perf_install_in_context(ctx, group_leader, cpu); | ||
| 5673 | get_ctx(ctx); | ||
| 5674 | list_for_each_entry(sibling, &group_leader->sibling_list, | ||
| 5675 | group_entry) { | ||
| 5676 | perf_install_in_context(ctx, sibling, cpu); | ||
| 5677 | get_ctx(ctx); | ||
| 5678 | } | ||
| 5679 | } | ||
| 5680 | |||
| 5197 | perf_install_in_context(ctx, event, cpu); | 5681 | perf_install_in_context(ctx, event, cpu); |
| 5198 | ++ctx->generation; | 5682 | ++ctx->generation; |
| 5199 | mutex_unlock(&ctx->mutex); | 5683 | mutex_unlock(&ctx->mutex); |
| @@ -5214,11 +5698,15 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5214 | fd_install(event_fd, event_file); | 5698 | fd_install(event_fd, event_file); |
| 5215 | return event_fd; | 5699 | return event_fd; |
| 5216 | 5700 | ||
| 5217 | err_free_put_context: | 5701 | err_context: |
| 5702 | put_ctx(ctx); | ||
| 5703 | err_alloc: | ||
| 5218 | free_event(event); | 5704 | free_event(event); |
| 5219 | err_put_context: | 5705 | err_task: |
| 5706 | if (task) | ||
| 5707 | put_task_struct(task); | ||
| 5708 | err_group_fd: | ||
| 5220 | fput_light(group_file, fput_needed); | 5709 | fput_light(group_file, fput_needed); |
| 5221 | put_ctx(ctx); | ||
| 5222 | err_fd: | 5710 | err_fd: |
| 5223 | put_unused_fd(event_fd); | 5711 | put_unused_fd(event_fd); |
| 5224 | return err; | 5712 | return err; |
| @@ -5229,32 +5717,31 @@ err_fd: | |||
| 5229 | * | 5717 | * |
| 5230 | * @attr: attributes of the counter to create | 5718 | * @attr: attributes of the counter to create |
| 5231 | * @cpu: cpu in which the counter is bound | 5719 | * @cpu: cpu in which the counter is bound |
| 5232 | * @pid: task to profile | 5720 | * @task: task to profile (NULL for percpu) |
| 5233 | */ | 5721 | */ |
| 5234 | struct perf_event * | 5722 | struct perf_event * |
| 5235 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 5723 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
| 5236 | pid_t pid, | 5724 | struct task_struct *task, |
| 5237 | perf_overflow_handler_t overflow_handler) | 5725 | perf_overflow_handler_t overflow_handler) |
| 5238 | { | 5726 | { |
| 5239 | struct perf_event *event; | ||
| 5240 | struct perf_event_context *ctx; | 5727 | struct perf_event_context *ctx; |
| 5728 | struct perf_event *event; | ||
| 5241 | int err; | 5729 | int err; |
| 5242 | 5730 | ||
| 5243 | /* | 5731 | /* |
| 5244 | * Get the target context (task or percpu): | 5732 | * Get the target context (task or percpu): |
| 5245 | */ | 5733 | */ |
| 5246 | 5734 | ||
| 5247 | ctx = find_get_context(pid, cpu); | 5735 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); |
| 5248 | if (IS_ERR(ctx)) { | ||
| 5249 | err = PTR_ERR(ctx); | ||
| 5250 | goto err_exit; | ||
| 5251 | } | ||
| 5252 | |||
| 5253 | event = perf_event_alloc(attr, cpu, ctx, NULL, | ||
| 5254 | NULL, overflow_handler, GFP_KERNEL); | ||
| 5255 | if (IS_ERR(event)) { | 5736 | if (IS_ERR(event)) { |
| 5256 | err = PTR_ERR(event); | 5737 | err = PTR_ERR(event); |
| 5257 | goto err_put_context; | 5738 | goto err; |
| 5739 | } | ||
| 5740 | |||
| 5741 | ctx = find_get_context(event->pmu, task, cpu); | ||
| 5742 | if (IS_ERR(ctx)) { | ||
| 5743 | err = PTR_ERR(ctx); | ||
| 5744 | goto err_free; | ||
| 5258 | } | 5745 | } |
| 5259 | 5746 | ||
| 5260 | event->filp = NULL; | 5747 | event->filp = NULL; |
| @@ -5272,112 +5759,13 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
| 5272 | 5759 | ||
| 5273 | return event; | 5760 | return event; |
| 5274 | 5761 | ||
| 5275 | err_put_context: | 5762 | err_free: |
| 5276 | put_ctx(ctx); | 5763 | free_event(event); |
| 5277 | err_exit: | 5764 | err: |
| 5278 | return ERR_PTR(err); | 5765 | return ERR_PTR(err); |
| 5279 | } | 5766 | } |
| 5280 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); | 5767 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); |
| 5281 | 5768 | ||
| 5282 | /* | ||
| 5283 | * inherit a event from parent task to child task: | ||
| 5284 | */ | ||
| 5285 | static struct perf_event * | ||
| 5286 | inherit_event(struct perf_event *parent_event, | ||
| 5287 | struct task_struct *parent, | ||
| 5288 | struct perf_event_context *parent_ctx, | ||
| 5289 | struct task_struct *child, | ||
| 5290 | struct perf_event *group_leader, | ||
| 5291 | struct perf_event_context *child_ctx) | ||
| 5292 | { | ||
| 5293 | struct perf_event *child_event; | ||
| 5294 | |||
| 5295 | /* | ||
| 5296 | * Instead of creating recursive hierarchies of events, | ||
| 5297 | * we link inherited events back to the original parent, | ||
| 5298 | * which has a filp for sure, which we use as the reference | ||
| 5299 | * count: | ||
| 5300 | */ | ||
| 5301 | if (parent_event->parent) | ||
| 5302 | parent_event = parent_event->parent; | ||
| 5303 | |||
| 5304 | child_event = perf_event_alloc(&parent_event->attr, | ||
| 5305 | parent_event->cpu, child_ctx, | ||
| 5306 | group_leader, parent_event, | ||
| 5307 | NULL, GFP_KERNEL); | ||
| 5308 | if (IS_ERR(child_event)) | ||
| 5309 | return child_event; | ||
| 5310 | get_ctx(child_ctx); | ||
| 5311 | |||
| 5312 | /* | ||
| 5313 | * Make the child state follow the state of the parent event, | ||
| 5314 | * not its attr.disabled bit. We hold the parent's mutex, | ||
| 5315 | * so we won't race with perf_event_{en, dis}able_family. | ||
| 5316 | */ | ||
| 5317 | if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) | ||
| 5318 | child_event->state = PERF_EVENT_STATE_INACTIVE; | ||
| 5319 | else | ||
| 5320 | child_event->state = PERF_EVENT_STATE_OFF; | ||
| 5321 | |||
| 5322 | if (parent_event->attr.freq) { | ||
| 5323 | u64 sample_period = parent_event->hw.sample_period; | ||
| 5324 | struct hw_perf_event *hwc = &child_event->hw; | ||
| 5325 | |||
| 5326 | hwc->sample_period = sample_period; | ||
| 5327 | hwc->last_period = sample_period; | ||
| 5328 | |||
| 5329 | local64_set(&hwc->period_left, sample_period); | ||
| 5330 | } | ||
| 5331 | |||
| 5332 | child_event->overflow_handler = parent_event->overflow_handler; | ||
| 5333 | |||
| 5334 | /* | ||
| 5335 | * Link it up in the child's context: | ||
| 5336 | */ | ||
| 5337 | add_event_to_ctx(child_event, child_ctx); | ||
| 5338 | |||
| 5339 | /* | ||
| 5340 | * Get a reference to the parent filp - we will fput it | ||
| 5341 | * when the child event exits. This is safe to do because | ||
| 5342 | * we are in the parent and we know that the filp still | ||
| 5343 | * exists and has a nonzero count: | ||
| 5344 | */ | ||
| 5345 | atomic_long_inc(&parent_event->filp->f_count); | ||
| 5346 | |||
| 5347 | /* | ||
| 5348 | * Link this into the parent event's child list | ||
| 5349 | */ | ||
| 5350 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | ||
| 5351 | mutex_lock(&parent_event->child_mutex); | ||
| 5352 | list_add_tail(&child_event->child_list, &parent_event->child_list); | ||
| 5353 | mutex_unlock(&parent_event->child_mutex); | ||
| 5354 | |||
| 5355 | return child_event; | ||
| 5356 | } | ||
| 5357 | |||
| 5358 | static int inherit_group(struct perf_event *parent_event, | ||
| 5359 | struct task_struct *parent, | ||
| 5360 | struct perf_event_context *parent_ctx, | ||
| 5361 | struct task_struct *child, | ||
| 5362 | struct perf_event_context *child_ctx) | ||
| 5363 | { | ||
| 5364 | struct perf_event *leader; | ||
| 5365 | struct perf_event *sub; | ||
| 5366 | struct perf_event *child_ctr; | ||
| 5367 | |||
| 5368 | leader = inherit_event(parent_event, parent, parent_ctx, | ||
| 5369 | child, NULL, child_ctx); | ||
| 5370 | if (IS_ERR(leader)) | ||
| 5371 | return PTR_ERR(leader); | ||
| 5372 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | ||
| 5373 | child_ctr = inherit_event(sub, parent, parent_ctx, | ||
| 5374 | child, leader, child_ctx); | ||
| 5375 | if (IS_ERR(child_ctr)) | ||
| 5376 | return PTR_ERR(child_ctr); | ||
| 5377 | } | ||
| 5378 | return 0; | ||
| 5379 | } | ||
| 5380 | |||
| 5381 | static void sync_child_event(struct perf_event *child_event, | 5769 | static void sync_child_event(struct perf_event *child_event, |
| 5382 | struct task_struct *child) | 5770 | struct task_struct *child) |
| 5383 | { | 5771 | { |
| @@ -5434,16 +5822,13 @@ __perf_event_exit_task(struct perf_event *child_event, | |||
| 5434 | } | 5822 | } |
| 5435 | } | 5823 | } |
| 5436 | 5824 | ||
| 5437 | /* | 5825 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) |
| 5438 | * When a child task exits, feed back event values to parent events. | ||
| 5439 | */ | ||
| 5440 | void perf_event_exit_task(struct task_struct *child) | ||
| 5441 | { | 5826 | { |
| 5442 | struct perf_event *child_event, *tmp; | 5827 | struct perf_event *child_event, *tmp; |
| 5443 | struct perf_event_context *child_ctx; | 5828 | struct perf_event_context *child_ctx; |
| 5444 | unsigned long flags; | 5829 | unsigned long flags; |
| 5445 | 5830 | ||
| 5446 | if (likely(!child->perf_event_ctxp)) { | 5831 | if (likely(!child->perf_event_ctxp[ctxn])) { |
| 5447 | perf_event_task(child, NULL, 0); | 5832 | perf_event_task(child, NULL, 0); |
| 5448 | return; | 5833 | return; |
| 5449 | } | 5834 | } |
| @@ -5455,8 +5840,8 @@ void perf_event_exit_task(struct task_struct *child) | |||
| 5455 | * scheduled, so we are now safe from rescheduling changing | 5840 | * scheduled, so we are now safe from rescheduling changing |
| 5456 | * our context. | 5841 | * our context. |
| 5457 | */ | 5842 | */ |
| 5458 | child_ctx = child->perf_event_ctxp; | 5843 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 5459 | __perf_event_task_sched_out(child_ctx); | 5844 | task_ctx_sched_out(child_ctx, EVENT_ALL); |
| 5460 | 5845 | ||
| 5461 | /* | 5846 | /* |
| 5462 | * Take the context lock here so that if find_get_context is | 5847 | * Take the context lock here so that if find_get_context is |
| @@ -5464,7 +5849,7 @@ void perf_event_exit_task(struct task_struct *child) | |||
| 5464 | * incremented the context's refcount before we do put_ctx below. | 5849 | * incremented the context's refcount before we do put_ctx below. |
| 5465 | */ | 5850 | */ |
| 5466 | raw_spin_lock(&child_ctx->lock); | 5851 | raw_spin_lock(&child_ctx->lock); |
| 5467 | child->perf_event_ctxp = NULL; | 5852 | child->perf_event_ctxp[ctxn] = NULL; |
| 5468 | /* | 5853 | /* |
| 5469 | * If this context is a clone; unclone it so it can't get | 5854 | * If this context is a clone; unclone it so it can't get |
| 5470 | * swapped to another process while we're removing all | 5855 | * swapped to another process while we're removing all |
| @@ -5517,6 +5902,17 @@ again: | |||
| 5517 | put_ctx(child_ctx); | 5902 | put_ctx(child_ctx); |
| 5518 | } | 5903 | } |
| 5519 | 5904 | ||
| 5905 | /* | ||
| 5906 | * When a child task exits, feed back event values to parent events. | ||
| 5907 | */ | ||
| 5908 | void perf_event_exit_task(struct task_struct *child) | ||
| 5909 | { | ||
| 5910 | int ctxn; | ||
| 5911 | |||
| 5912 | for_each_task_context_nr(ctxn) | ||
| 5913 | perf_event_exit_task_context(child, ctxn); | ||
| 5914 | } | ||
| 5915 | |||
| 5520 | static void perf_free_event(struct perf_event *event, | 5916 | static void perf_free_event(struct perf_event *event, |
| 5521 | struct perf_event_context *ctx) | 5917 | struct perf_event_context *ctx) |
| 5522 | { | 5918 | { |
| @@ -5538,48 +5934,166 @@ static void perf_free_event(struct perf_event *event, | |||
| 5538 | 5934 | ||
| 5539 | /* | 5935 | /* |
| 5540 | * free an unexposed, unused context as created by inheritance by | 5936 | * free an unexposed, unused context as created by inheritance by |
| 5541 | * init_task below, used by fork() in case of fail. | 5937 | * perf_event_init_task below, used by fork() in case of fail. |
| 5542 | */ | 5938 | */ |
| 5543 | void perf_event_free_task(struct task_struct *task) | 5939 | void perf_event_free_task(struct task_struct *task) |
| 5544 | { | 5940 | { |
| 5545 | struct perf_event_context *ctx = task->perf_event_ctxp; | 5941 | struct perf_event_context *ctx; |
| 5546 | struct perf_event *event, *tmp; | 5942 | struct perf_event *event, *tmp; |
| 5943 | int ctxn; | ||
| 5547 | 5944 | ||
| 5548 | if (!ctx) | 5945 | for_each_task_context_nr(ctxn) { |
| 5549 | return; | 5946 | ctx = task->perf_event_ctxp[ctxn]; |
| 5947 | if (!ctx) | ||
| 5948 | continue; | ||
| 5550 | 5949 | ||
| 5551 | mutex_lock(&ctx->mutex); | 5950 | mutex_lock(&ctx->mutex); |
| 5552 | again: | 5951 | again: |
| 5553 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 5952 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, |
| 5554 | perf_free_event(event, ctx); | 5953 | group_entry) |
| 5954 | perf_free_event(event, ctx); | ||
| 5555 | 5955 | ||
| 5556 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, | 5956 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, |
| 5557 | group_entry) | 5957 | group_entry) |
| 5558 | perf_free_event(event, ctx); | 5958 | perf_free_event(event, ctx); |
| 5559 | 5959 | ||
| 5560 | if (!list_empty(&ctx->pinned_groups) || | 5960 | if (!list_empty(&ctx->pinned_groups) || |
| 5561 | !list_empty(&ctx->flexible_groups)) | 5961 | !list_empty(&ctx->flexible_groups)) |
| 5562 | goto again; | 5962 | goto again; |
| 5563 | 5963 | ||
| 5564 | mutex_unlock(&ctx->mutex); | 5964 | mutex_unlock(&ctx->mutex); |
| 5565 | 5965 | ||
| 5566 | put_ctx(ctx); | 5966 | put_ctx(ctx); |
| 5967 | } | ||
| 5968 | } | ||
| 5969 | |||
| 5970 | void perf_event_delayed_put(struct task_struct *task) | ||
| 5971 | { | ||
| 5972 | int ctxn; | ||
| 5973 | |||
| 5974 | for_each_task_context_nr(ctxn) | ||
| 5975 | WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); | ||
| 5976 | } | ||
| 5977 | |||
| 5978 | /* | ||
| 5979 | * inherit a event from parent task to child task: | ||
| 5980 | */ | ||
| 5981 | static struct perf_event * | ||
| 5982 | inherit_event(struct perf_event *parent_event, | ||
| 5983 | struct task_struct *parent, | ||
| 5984 | struct perf_event_context *parent_ctx, | ||
| 5985 | struct task_struct *child, | ||
| 5986 | struct perf_event *group_leader, | ||
| 5987 | struct perf_event_context *child_ctx) | ||
| 5988 | { | ||
| 5989 | struct perf_event *child_event; | ||
| 5990 | unsigned long flags; | ||
| 5991 | |||
| 5992 | /* | ||
| 5993 | * Instead of creating recursive hierarchies of events, | ||
| 5994 | * we link inherited events back to the original parent, | ||
| 5995 | * which has a filp for sure, which we use as the reference | ||
| 5996 | * count: | ||
| 5997 | */ | ||
| 5998 | if (parent_event->parent) | ||
| 5999 | parent_event = parent_event->parent; | ||
| 6000 | |||
| 6001 | child_event = perf_event_alloc(&parent_event->attr, | ||
| 6002 | parent_event->cpu, | ||
| 6003 | child, | ||
| 6004 | group_leader, parent_event, | ||
| 6005 | NULL); | ||
| 6006 | if (IS_ERR(child_event)) | ||
| 6007 | return child_event; | ||
| 6008 | get_ctx(child_ctx); | ||
| 6009 | |||
| 6010 | /* | ||
| 6011 | * Make the child state follow the state of the parent event, | ||
| 6012 | * not its attr.disabled bit. We hold the parent's mutex, | ||
| 6013 | * so we won't race with perf_event_{en, dis}able_family. | ||
| 6014 | */ | ||
| 6015 | if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) | ||
| 6016 | child_event->state = PERF_EVENT_STATE_INACTIVE; | ||
| 6017 | else | ||
| 6018 | child_event->state = PERF_EVENT_STATE_OFF; | ||
| 6019 | |||
| 6020 | if (parent_event->attr.freq) { | ||
| 6021 | u64 sample_period = parent_event->hw.sample_period; | ||
| 6022 | struct hw_perf_event *hwc = &child_event->hw; | ||
| 6023 | |||
| 6024 | hwc->sample_period = sample_period; | ||
| 6025 | hwc->last_period = sample_period; | ||
| 6026 | |||
| 6027 | local64_set(&hwc->period_left, sample_period); | ||
| 6028 | } | ||
| 6029 | |||
| 6030 | child_event->ctx = child_ctx; | ||
| 6031 | child_event->overflow_handler = parent_event->overflow_handler; | ||
| 6032 | |||
| 6033 | /* | ||
| 6034 | * Link it up in the child's context: | ||
| 6035 | */ | ||
| 6036 | raw_spin_lock_irqsave(&child_ctx->lock, flags); | ||
| 6037 | add_event_to_ctx(child_event, child_ctx); | ||
| 6038 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
| 6039 | |||
| 6040 | /* | ||
| 6041 | * Get a reference to the parent filp - we will fput it | ||
| 6042 | * when the child event exits. This is safe to do because | ||
| 6043 | * we are in the parent and we know that the filp still | ||
| 6044 | * exists and has a nonzero count: | ||
| 6045 | */ | ||
| 6046 | atomic_long_inc(&parent_event->filp->f_count); | ||
| 6047 | |||
| 6048 | /* | ||
| 6049 | * Link this into the parent event's child list | ||
| 6050 | */ | ||
| 6051 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | ||
| 6052 | mutex_lock(&parent_event->child_mutex); | ||
| 6053 | list_add_tail(&child_event->child_list, &parent_event->child_list); | ||
| 6054 | mutex_unlock(&parent_event->child_mutex); | ||
| 6055 | |||
| 6056 | return child_event; | ||
| 6057 | } | ||
| 6058 | |||
| 6059 | static int inherit_group(struct perf_event *parent_event, | ||
| 6060 | struct task_struct *parent, | ||
| 6061 | struct perf_event_context *parent_ctx, | ||
| 6062 | struct task_struct *child, | ||
| 6063 | struct perf_event_context *child_ctx) | ||
| 6064 | { | ||
| 6065 | struct perf_event *leader; | ||
| 6066 | struct perf_event *sub; | ||
| 6067 | struct perf_event *child_ctr; | ||
| 6068 | |||
| 6069 | leader = inherit_event(parent_event, parent, parent_ctx, | ||
| 6070 | child, NULL, child_ctx); | ||
| 6071 | if (IS_ERR(leader)) | ||
| 6072 | return PTR_ERR(leader); | ||
| 6073 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | ||
| 6074 | child_ctr = inherit_event(sub, parent, parent_ctx, | ||
| 6075 | child, leader, child_ctx); | ||
| 6076 | if (IS_ERR(child_ctr)) | ||
| 6077 | return PTR_ERR(child_ctr); | ||
| 6078 | } | ||
| 6079 | return 0; | ||
| 5567 | } | 6080 | } |
| 5568 | 6081 | ||
| 5569 | static int | 6082 | static int |
| 5570 | inherit_task_group(struct perf_event *event, struct task_struct *parent, | 6083 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
| 5571 | struct perf_event_context *parent_ctx, | 6084 | struct perf_event_context *parent_ctx, |
| 5572 | struct task_struct *child, | 6085 | struct task_struct *child, int ctxn, |
| 5573 | int *inherited_all) | 6086 | int *inherited_all) |
| 5574 | { | 6087 | { |
| 5575 | int ret; | 6088 | int ret; |
| 5576 | struct perf_event_context *child_ctx = child->perf_event_ctxp; | 6089 | struct perf_event_context *child_ctx; |
| 5577 | 6090 | ||
| 5578 | if (!event->attr.inherit) { | 6091 | if (!event->attr.inherit) { |
| 5579 | *inherited_all = 0; | 6092 | *inherited_all = 0; |
| 5580 | return 0; | 6093 | return 0; |
| 5581 | } | 6094 | } |
| 5582 | 6095 | ||
| 6096 | child_ctx = child->perf_event_ctxp[ctxn]; | ||
| 5583 | if (!child_ctx) { | 6097 | if (!child_ctx) { |
| 5584 | /* | 6098 | /* |
| 5585 | * This is executed from the parent task context, so | 6099 | * This is executed from the parent task context, so |
| @@ -5588,14 +6102,11 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
| 5588 | * child. | 6102 | * child. |
| 5589 | */ | 6103 | */ |
| 5590 | 6104 | ||
| 5591 | child_ctx = kzalloc(sizeof(struct perf_event_context), | 6105 | child_ctx = alloc_perf_context(event->pmu, child); |
| 5592 | GFP_KERNEL); | ||
| 5593 | if (!child_ctx) | 6106 | if (!child_ctx) |
| 5594 | return -ENOMEM; | 6107 | return -ENOMEM; |
| 5595 | 6108 | ||
| 5596 | __perf_event_init_context(child_ctx, child); | 6109 | child->perf_event_ctxp[ctxn] = child_ctx; |
| 5597 | child->perf_event_ctxp = child_ctx; | ||
| 5598 | get_task_struct(child); | ||
| 5599 | } | 6110 | } |
| 5600 | 6111 | ||
| 5601 | ret = inherit_group(event, parent, parent_ctx, | 6112 | ret = inherit_group(event, parent, parent_ctx, |
| @@ -5607,11 +6118,10 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
| 5607 | return ret; | 6118 | return ret; |
| 5608 | } | 6119 | } |
| 5609 | 6120 | ||
| 5610 | |||
| 5611 | /* | 6121 | /* |
| 5612 | * Initialize the perf_event context in task_struct | 6122 | * Initialize the perf_event context in task_struct |
| 5613 | */ | 6123 | */ |
| 5614 | int perf_event_init_task(struct task_struct *child) | 6124 | int perf_event_init_context(struct task_struct *child, int ctxn) |
| 5615 | { | 6125 | { |
| 5616 | struct perf_event_context *child_ctx, *parent_ctx; | 6126 | struct perf_event_context *child_ctx, *parent_ctx; |
| 5617 | struct perf_event_context *cloned_ctx; | 6127 | struct perf_event_context *cloned_ctx; |
| @@ -5620,19 +6130,19 @@ int perf_event_init_task(struct task_struct *child) | |||
| 5620 | int inherited_all = 1; | 6130 | int inherited_all = 1; |
| 5621 | int ret = 0; | 6131 | int ret = 0; |
| 5622 | 6132 | ||
| 5623 | child->perf_event_ctxp = NULL; | 6133 | child->perf_event_ctxp[ctxn] = NULL; |
| 5624 | 6134 | ||
| 5625 | mutex_init(&child->perf_event_mutex); | 6135 | mutex_init(&child->perf_event_mutex); |
| 5626 | INIT_LIST_HEAD(&child->perf_event_list); | 6136 | INIT_LIST_HEAD(&child->perf_event_list); |
| 5627 | 6137 | ||
| 5628 | if (likely(!parent->perf_event_ctxp)) | 6138 | if (likely(!parent->perf_event_ctxp[ctxn])) |
| 5629 | return 0; | 6139 | return 0; |
| 5630 | 6140 | ||
| 5631 | /* | 6141 | /* |
| 5632 | * If the parent's context is a clone, pin it so it won't get | 6142 | * If the parent's context is a clone, pin it so it won't get |
| 5633 | * swapped under us. | 6143 | * swapped under us. |
| 5634 | */ | 6144 | */ |
| 5635 | parent_ctx = perf_pin_task_context(parent); | 6145 | parent_ctx = perf_pin_task_context(parent, ctxn); |
| 5636 | 6146 | ||
| 5637 | /* | 6147 | /* |
| 5638 | * No need to check if parent_ctx != NULL here; since we saw | 6148 | * No need to check if parent_ctx != NULL here; since we saw |
| @@ -5652,20 +6162,20 @@ int perf_event_init_task(struct task_struct *child) | |||
| 5652 | * the list, not manipulating it: | 6162 | * the list, not manipulating it: |
| 5653 | */ | 6163 | */ |
| 5654 | list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { | 6164 | list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { |
| 5655 | ret = inherit_task_group(event, parent, parent_ctx, child, | 6165 | ret = inherit_task_group(event, parent, parent_ctx, |
| 5656 | &inherited_all); | 6166 | child, ctxn, &inherited_all); |
| 5657 | if (ret) | 6167 | if (ret) |
| 5658 | break; | 6168 | break; |
| 5659 | } | 6169 | } |
| 5660 | 6170 | ||
| 5661 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { | 6171 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { |
| 5662 | ret = inherit_task_group(event, parent, parent_ctx, child, | 6172 | ret = inherit_task_group(event, parent, parent_ctx, |
| 5663 | &inherited_all); | 6173 | child, ctxn, &inherited_all); |
| 5664 | if (ret) | 6174 | if (ret) |
| 5665 | break; | 6175 | break; |
| 5666 | } | 6176 | } |
| 5667 | 6177 | ||
| 5668 | child_ctx = child->perf_event_ctxp; | 6178 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 5669 | 6179 | ||
| 5670 | if (child_ctx && inherited_all) { | 6180 | if (child_ctx && inherited_all) { |
| 5671 | /* | 6181 | /* |
| @@ -5694,63 +6204,98 @@ int perf_event_init_task(struct task_struct *child) | |||
| 5694 | return ret; | 6204 | return ret; |
| 5695 | } | 6205 | } |
| 5696 | 6206 | ||
| 6207 | /* | ||
| 6208 | * Initialize the perf_event context in task_struct | ||
| 6209 | */ | ||
| 6210 | int perf_event_init_task(struct task_struct *child) | ||
| 6211 | { | ||
| 6212 | int ctxn, ret; | ||
| 6213 | |||
| 6214 | for_each_task_context_nr(ctxn) { | ||
| 6215 | ret = perf_event_init_context(child, ctxn); | ||
| 6216 | if (ret) | ||
| 6217 | return ret; | ||
| 6218 | } | ||
| 6219 | |||
| 6220 | return 0; | ||
| 6221 | } | ||
| 6222 | |||
| 5697 | static void __init perf_event_init_all_cpus(void) | 6223 | static void __init perf_event_init_all_cpus(void) |
| 5698 | { | 6224 | { |
| 6225 | struct swevent_htable *swhash; | ||
| 5699 | int cpu; | 6226 | int cpu; |
| 5700 | struct perf_cpu_context *cpuctx; | ||
| 5701 | 6227 | ||
| 5702 | for_each_possible_cpu(cpu) { | 6228 | for_each_possible_cpu(cpu) { |
| 5703 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 6229 | swhash = &per_cpu(swevent_htable, cpu); |
| 5704 | mutex_init(&cpuctx->hlist_mutex); | 6230 | mutex_init(&swhash->hlist_mutex); |
| 5705 | __perf_event_init_context(&cpuctx->ctx, NULL); | 6231 | INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); |
| 5706 | } | 6232 | } |
| 5707 | } | 6233 | } |
| 5708 | 6234 | ||
| 5709 | static void __cpuinit perf_event_init_cpu(int cpu) | 6235 | static void __cpuinit perf_event_init_cpu(int cpu) |
| 5710 | { | 6236 | { |
| 5711 | struct perf_cpu_context *cpuctx; | 6237 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
| 5712 | |||
| 5713 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
| 5714 | 6238 | ||
| 5715 | spin_lock(&perf_resource_lock); | 6239 | mutex_lock(&swhash->hlist_mutex); |
| 5716 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | 6240 | if (swhash->hlist_refcount > 0) { |
| 5717 | spin_unlock(&perf_resource_lock); | ||
| 5718 | |||
| 5719 | mutex_lock(&cpuctx->hlist_mutex); | ||
| 5720 | if (cpuctx->hlist_refcount > 0) { | ||
| 5721 | struct swevent_hlist *hlist; | 6241 | struct swevent_hlist *hlist; |
| 5722 | 6242 | ||
| 5723 | hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); | 6243 | hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); |
| 5724 | WARN_ON_ONCE(!hlist); | 6244 | WARN_ON(!hlist); |
| 5725 | rcu_assign_pointer(cpuctx->swevent_hlist, hlist); | 6245 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
| 5726 | } | 6246 | } |
| 5727 | mutex_unlock(&cpuctx->hlist_mutex); | 6247 | mutex_unlock(&swhash->hlist_mutex); |
| 5728 | } | 6248 | } |
| 5729 | 6249 | ||
| 5730 | #ifdef CONFIG_HOTPLUG_CPU | 6250 | #ifdef CONFIG_HOTPLUG_CPU |
| 5731 | static void __perf_event_exit_cpu(void *info) | 6251 | static void perf_pmu_rotate_stop(struct pmu *pmu) |
| 5732 | { | 6252 | { |
| 5733 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 6253 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 5734 | struct perf_event_context *ctx = &cpuctx->ctx; | 6254 | |
| 6255 | WARN_ON(!irqs_disabled()); | ||
| 6256 | |||
| 6257 | list_del_init(&cpuctx->rotation_list); | ||
| 6258 | } | ||
| 6259 | |||
| 6260 | static void __perf_event_exit_context(void *__info) | ||
| 6261 | { | ||
| 6262 | struct perf_event_context *ctx = __info; | ||
| 5735 | struct perf_event *event, *tmp; | 6263 | struct perf_event *event, *tmp; |
| 5736 | 6264 | ||
| 6265 | perf_pmu_rotate_stop(ctx->pmu); | ||
| 6266 | |||
| 5737 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) | 6267 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) |
| 5738 | __perf_event_remove_from_context(event); | 6268 | __perf_event_remove_from_context(event); |
| 5739 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) | 6269 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) |
| 5740 | __perf_event_remove_from_context(event); | 6270 | __perf_event_remove_from_context(event); |
| 5741 | } | 6271 | } |
| 6272 | |||
| 6273 | static void perf_event_exit_cpu_context(int cpu) | ||
| 6274 | { | ||
| 6275 | struct perf_event_context *ctx; | ||
| 6276 | struct pmu *pmu; | ||
| 6277 | int idx; | ||
| 6278 | |||
| 6279 | idx = srcu_read_lock(&pmus_srcu); | ||
| 6280 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
| 6281 | ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; | ||
| 6282 | |||
| 6283 | mutex_lock(&ctx->mutex); | ||
| 6284 | smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); | ||
| 6285 | mutex_unlock(&ctx->mutex); | ||
| 6286 | } | ||
| 6287 | srcu_read_unlock(&pmus_srcu, idx); | ||
| 6288 | } | ||
| 6289 | |||
| 5742 | static void perf_event_exit_cpu(int cpu) | 6290 | static void perf_event_exit_cpu(int cpu) |
| 5743 | { | 6291 | { |
| 5744 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 6292 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
| 5745 | struct perf_event_context *ctx = &cpuctx->ctx; | ||
| 5746 | 6293 | ||
| 5747 | mutex_lock(&cpuctx->hlist_mutex); | 6294 | mutex_lock(&swhash->hlist_mutex); |
| 5748 | swevent_hlist_release(cpuctx); | 6295 | swevent_hlist_release(swhash); |
| 5749 | mutex_unlock(&cpuctx->hlist_mutex); | 6296 | mutex_unlock(&swhash->hlist_mutex); |
| 5750 | 6297 | ||
| 5751 | mutex_lock(&ctx->mutex); | 6298 | perf_event_exit_cpu_context(cpu); |
| 5752 | smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); | ||
| 5753 | mutex_unlock(&ctx->mutex); | ||
| 5754 | } | 6299 | } |
| 5755 | #else | 6300 | #else |
| 5756 | static inline void perf_event_exit_cpu(int cpu) { } | 6301 | static inline void perf_event_exit_cpu(int cpu) { } |
| @@ -5780,118 +6325,13 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
| 5780 | return NOTIFY_OK; | 6325 | return NOTIFY_OK; |
| 5781 | } | 6326 | } |
| 5782 | 6327 | ||
| 5783 | /* | ||
| 5784 | * This has to have a higher priority than migration_notifier in sched.c. | ||
| 5785 | */ | ||
| 5786 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | ||
| 5787 | .notifier_call = perf_cpu_notify, | ||
| 5788 | .priority = 20, | ||
| 5789 | }; | ||
| 5790 | |||
| 5791 | void __init perf_event_init(void) | 6328 | void __init perf_event_init(void) |
| 5792 | { | 6329 | { |
| 5793 | perf_event_init_all_cpus(); | 6330 | perf_event_init_all_cpus(); |
| 5794 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 6331 | init_srcu_struct(&pmus_srcu); |
| 5795 | (void *)(long)smp_processor_id()); | 6332 | perf_pmu_register(&perf_swevent); |
| 5796 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | 6333 | perf_pmu_register(&perf_cpu_clock); |
| 5797 | (void *)(long)smp_processor_id()); | 6334 | perf_pmu_register(&perf_task_clock); |
| 5798 | register_cpu_notifier(&perf_cpu_nb); | 6335 | perf_tp_register(); |
| 5799 | } | 6336 | perf_cpu_notifier(perf_cpu_notify); |
| 5800 | |||
| 5801 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, | ||
| 5802 | struct sysdev_class_attribute *attr, | ||
| 5803 | char *buf) | ||
| 5804 | { | ||
| 5805 | return sprintf(buf, "%d\n", perf_reserved_percpu); | ||
| 5806 | } | ||
| 5807 | |||
| 5808 | static ssize_t | ||
| 5809 | perf_set_reserve_percpu(struct sysdev_class *class, | ||
| 5810 | struct sysdev_class_attribute *attr, | ||
| 5811 | const char *buf, | ||
| 5812 | size_t count) | ||
| 5813 | { | ||
| 5814 | struct perf_cpu_context *cpuctx; | ||
| 5815 | unsigned long val; | ||
| 5816 | int err, cpu, mpt; | ||
| 5817 | |||
| 5818 | err = strict_strtoul(buf, 10, &val); | ||
| 5819 | if (err) | ||
| 5820 | return err; | ||
| 5821 | if (val > perf_max_events) | ||
| 5822 | return -EINVAL; | ||
| 5823 | |||
| 5824 | spin_lock(&perf_resource_lock); | ||
| 5825 | perf_reserved_percpu = val; | ||
| 5826 | for_each_online_cpu(cpu) { | ||
| 5827 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
| 5828 | raw_spin_lock_irq(&cpuctx->ctx.lock); | ||
| 5829 | mpt = min(perf_max_events - cpuctx->ctx.nr_events, | ||
| 5830 | perf_max_events - perf_reserved_percpu); | ||
| 5831 | cpuctx->max_pertask = mpt; | ||
| 5832 | raw_spin_unlock_irq(&cpuctx->ctx.lock); | ||
| 5833 | } | ||
| 5834 | spin_unlock(&perf_resource_lock); | ||
| 5835 | |||
| 5836 | return count; | ||
| 5837 | } | ||
| 5838 | |||
| 5839 | static ssize_t perf_show_overcommit(struct sysdev_class *class, | ||
| 5840 | struct sysdev_class_attribute *attr, | ||
| 5841 | char *buf) | ||
| 5842 | { | ||
| 5843 | return sprintf(buf, "%d\n", perf_overcommit); | ||
| 5844 | } | ||
| 5845 | |||
| 5846 | static ssize_t | ||
| 5847 | perf_set_overcommit(struct sysdev_class *class, | ||
| 5848 | struct sysdev_class_attribute *attr, | ||
| 5849 | const char *buf, size_t count) | ||
| 5850 | { | ||
| 5851 | unsigned long val; | ||
| 5852 | int err; | ||
| 5853 | |||
| 5854 | err = strict_strtoul(buf, 10, &val); | ||
| 5855 | if (err) | ||
| 5856 | return err; | ||
| 5857 | if (val > 1) | ||
| 5858 | return -EINVAL; | ||
| 5859 | |||
| 5860 | spin_lock(&perf_resource_lock); | ||
| 5861 | perf_overcommit = val; | ||
| 5862 | spin_unlock(&perf_resource_lock); | ||
| 5863 | |||
| 5864 | return count; | ||
| 5865 | } | ||
| 5866 | |||
| 5867 | static SYSDEV_CLASS_ATTR( | ||
| 5868 | reserve_percpu, | ||
| 5869 | 0644, | ||
| 5870 | perf_show_reserve_percpu, | ||
| 5871 | perf_set_reserve_percpu | ||
| 5872 | ); | ||
| 5873 | |||
| 5874 | static SYSDEV_CLASS_ATTR( | ||
| 5875 | overcommit, | ||
| 5876 | 0644, | ||
| 5877 | perf_show_overcommit, | ||
| 5878 | perf_set_overcommit | ||
| 5879 | ); | ||
| 5880 | |||
| 5881 | static struct attribute *perfclass_attrs[] = { | ||
| 5882 | &attr_reserve_percpu.attr, | ||
| 5883 | &attr_overcommit.attr, | ||
| 5884 | NULL | ||
| 5885 | }; | ||
| 5886 | |||
| 5887 | static struct attribute_group perfclass_attr_group = { | ||
| 5888 | .attrs = perfclass_attrs, | ||
| 5889 | .name = "perf_events", | ||
| 5890 | }; | ||
| 5891 | |||
| 5892 | static int __init perf_event_sysfs_init(void) | ||
| 5893 | { | ||
| 5894 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | ||
| 5895 | &perfclass_attr_group); | ||
| 5896 | } | 6337 | } |
| 5897 | device_initcall(perf_event_sysfs_init); | ||
diff --git a/kernel/pid.c b/kernel/pid.c index d55c6fb8d087..39b65b69584f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -401,7 +401,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) | |||
| 401 | struct task_struct *result = NULL; | 401 | struct task_struct *result = NULL; |
| 402 | if (pid) { | 402 | if (pid) { |
| 403 | struct hlist_node *first; | 403 | struct hlist_node *first; |
| 404 | first = rcu_dereference_check(pid->tasks[type].first, | 404 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
| 405 | rcu_read_lock_held() || | 405 | rcu_read_lock_held() || |
| 406 | lockdep_tasklist_lock_is_held()); | 406 | lockdep_tasklist_lock_is_held()); |
| 407 | if (first) | 407 | if (first) |
| @@ -416,6 +416,7 @@ EXPORT_SYMBOL(pid_task); | |||
| 416 | */ | 416 | */ |
| 417 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) | 417 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
| 418 | { | 418 | { |
| 419 | rcu_lockdep_assert(rcu_read_lock_held()); | ||
| 419 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); | 420 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
| 420 | } | 421 | } |
| 421 | 422 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 8fe465ac008a..2531017795f6 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -85,7 +85,7 @@ EXPORT_SYMBOL(oops_in_progress); | |||
| 85 | * provides serialisation for access to the entire console | 85 | * provides serialisation for access to the entire console |
| 86 | * driver system. | 86 | * driver system. |
| 87 | */ | 87 | */ |
| 88 | static DECLARE_MUTEX(console_sem); | 88 | static DEFINE_SEMAPHORE(console_sem); |
| 89 | struct console *console_drivers; | 89 | struct console *console_drivers; |
| 90 | EXPORT_SYMBOL_GPL(console_drivers); | 90 | EXPORT_SYMBOL_GPL(console_drivers); |
| 91 | 91 | ||
| @@ -556,7 +556,7 @@ static void zap_locks(void) | |||
| 556 | /* If a crash is occurring, make sure we can't deadlock */ | 556 | /* If a crash is occurring, make sure we can't deadlock */ |
| 557 | spin_lock_init(&logbuf_lock); | 557 | spin_lock_init(&logbuf_lock); |
| 558 | /* And make sure that we print immediately */ | 558 | /* And make sure that we print immediately */ |
| 559 | init_MUTEX(&console_sem); | 559 | sema_init(&console_sem, 1); |
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | #if defined(CONFIG_PRINTK_TIME) | 562 | #if defined(CONFIG_PRINTK_TIME) |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 4d169835fb36..a23a57a976d1 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -73,12 +73,14 @@ int debug_lockdep_rcu_enabled(void) | |||
| 73 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); | 73 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
| 74 | 74 | ||
| 75 | /** | 75 | /** |
| 76 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | 76 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
| 77 | * | 77 | * |
| 78 | * Check for bottom half being disabled, which covers both the | 78 | * Check for bottom half being disabled, which covers both the |
| 79 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | 79 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses |
| 80 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | 80 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) |
| 81 | * will show the situation. | 81 | * will show the situation. This is useful for debug checks in functions |
| 82 | * that require that they be called within an RCU read-side critical | ||
| 83 | * section. | ||
| 82 | * | 84 | * |
| 83 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | 85 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
| 84 | */ | 86 | */ |
| @@ -86,7 +88,7 @@ int rcu_read_lock_bh_held(void) | |||
| 86 | { | 88 | { |
| 87 | if (!debug_lockdep_rcu_enabled()) | 89 | if (!debug_lockdep_rcu_enabled()) |
| 88 | return 1; | 90 | return 1; |
| 89 | return in_softirq(); | 91 | return in_softirq() || irqs_disabled(); |
| 90 | } | 92 | } |
| 91 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | 93 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
| 92 | 94 | ||
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 196ec02f8be0..d806735342ac 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
| @@ -59,6 +59,14 @@ int rcu_scheduler_active __read_mostly; | |||
| 59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
| 60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 61 | 61 | ||
| 62 | /* Forward declarations for rcutiny_plugin.h. */ | ||
| 63 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); | ||
| 64 | static void __call_rcu(struct rcu_head *head, | ||
| 65 | void (*func)(struct rcu_head *rcu), | ||
| 66 | struct rcu_ctrlblk *rcp); | ||
| 67 | |||
| 68 | #include "rcutiny_plugin.h" | ||
| 69 | |||
| 62 | #ifdef CONFIG_NO_HZ | 70 | #ifdef CONFIG_NO_HZ |
| 63 | 71 | ||
| 64 | static long rcu_dynticks_nesting = 1; | 72 | static long rcu_dynticks_nesting = 1; |
| @@ -140,6 +148,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 140 | rcu_sched_qs(cpu); | 148 | rcu_sched_qs(cpu); |
| 141 | else if (!in_softirq()) | 149 | else if (!in_softirq()) |
| 142 | rcu_bh_qs(cpu); | 150 | rcu_bh_qs(cpu); |
| 151 | rcu_preempt_check_callbacks(); | ||
| 143 | } | 152 | } |
| 144 | 153 | ||
| 145 | /* | 154 | /* |
| @@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
| 162 | *rcp->donetail = NULL; | 171 | *rcp->donetail = NULL; |
| 163 | if (rcp->curtail == rcp->donetail) | 172 | if (rcp->curtail == rcp->donetail) |
| 164 | rcp->curtail = &rcp->rcucblist; | 173 | rcp->curtail = &rcp->rcucblist; |
| 174 | rcu_preempt_remove_callbacks(rcp); | ||
| 165 | rcp->donetail = &rcp->rcucblist; | 175 | rcp->donetail = &rcp->rcucblist; |
| 166 | local_irq_restore(flags); | 176 | local_irq_restore(flags); |
| 167 | 177 | ||
| @@ -182,6 +192,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
| 182 | { | 192 | { |
| 183 | __rcu_process_callbacks(&rcu_sched_ctrlblk); | 193 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
| 184 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 194 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
| 195 | rcu_preempt_process_callbacks(); | ||
| 185 | } | 196 | } |
| 186 | 197 | ||
| 187 | /* | 198 | /* |
| @@ -223,15 +234,15 @@ static void __call_rcu(struct rcu_head *head, | |||
| 223 | } | 234 | } |
| 224 | 235 | ||
| 225 | /* | 236 | /* |
| 226 | * Post an RCU callback to be invoked after the end of an RCU grace | 237 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
| 227 | * period. But since we have but one CPU, that would be after any | 238 | * period. But since we have but one CPU, that would be after any |
| 228 | * quiescent state. | 239 | * quiescent state. |
| 229 | */ | 240 | */ |
| 230 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 241 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
| 231 | { | 242 | { |
| 232 | __call_rcu(head, func, &rcu_sched_ctrlblk); | 243 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
| 233 | } | 244 | } |
| 234 | EXPORT_SYMBOL_GPL(call_rcu); | 245 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
| 235 | 246 | ||
| 236 | /* | 247 | /* |
| 237 | * Post an RCU bottom-half callback to be invoked after any subsequent | 248 | * Post an RCU bottom-half callback to be invoked after any subsequent |
| @@ -243,20 +254,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
| 243 | } | 254 | } |
| 244 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 255 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
| 245 | 256 | ||
| 246 | void rcu_barrier(void) | ||
| 247 | { | ||
| 248 | struct rcu_synchronize rcu; | ||
| 249 | |||
| 250 | init_rcu_head_on_stack(&rcu.head); | ||
| 251 | init_completion(&rcu.completion); | ||
| 252 | /* Will wake me after RCU finished. */ | ||
| 253 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
| 254 | /* Wait for it. */ | ||
| 255 | wait_for_completion(&rcu.completion); | ||
| 256 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 257 | } | ||
| 258 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
| 259 | |||
| 260 | void rcu_barrier_bh(void) | 257 | void rcu_barrier_bh(void) |
| 261 | { | 258 | { |
| 262 | struct rcu_synchronize rcu; | 259 | struct rcu_synchronize rcu; |
| @@ -289,5 +286,3 @@ void __init rcu_init(void) | |||
| 289 | { | 286 | { |
| 290 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 287 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 291 | } | 288 | } |
| 292 | |||
| 293 | #include "rcutiny_plugin.h" | ||
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index d223a92bc742..6ceca4f745ff 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition |
| 3 | * Internal non-public definitions that provide either classic | 3 | * Internal non-public definitions that provide either classic |
| 4 | * or preemptable semantics. | 4 | * or preemptible semantics. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| @@ -17,11 +17,587 @@ | |||
| 17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 19 | * | 19 | * |
| 20 | * Copyright IBM Corporation, 2009 | 20 | * Copyright (c) 2010 Linaro |
| 21 | * | 21 | * |
| 22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_TINY_PREEMPT_RCU | ||
| 26 | |||
| 27 | #include <linux/delay.h> | ||
| 28 | |||
| 29 | /* Global control variables for preemptible RCU. */ | ||
| 30 | struct rcu_preempt_ctrlblk { | ||
| 31 | struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ | ||
| 32 | struct rcu_head **nexttail; | ||
| 33 | /* Tasks blocked in a preemptible RCU */ | ||
| 34 | /* read-side critical section while an */ | ||
| 35 | /* preemptible-RCU grace period is in */ | ||
| 36 | /* progress must wait for a later grace */ | ||
| 37 | /* period. This pointer points to the */ | ||
| 38 | /* ->next pointer of the last task that */ | ||
| 39 | /* must wait for a later grace period, or */ | ||
| 40 | /* to &->rcb.rcucblist if there is no */ | ||
| 41 | /* such task. */ | ||
| 42 | struct list_head blkd_tasks; | ||
| 43 | /* Tasks blocked in RCU read-side critical */ | ||
| 44 | /* section. Tasks are placed at the head */ | ||
| 45 | /* of this list and age towards the tail. */ | ||
| 46 | struct list_head *gp_tasks; | ||
| 47 | /* Pointer to the first task blocking the */ | ||
| 48 | /* current grace period, or NULL if there */ | ||
| 49 | /* is not such task. */ | ||
| 50 | struct list_head *exp_tasks; | ||
| 51 | /* Pointer to first task blocking the */ | ||
| 52 | /* current expedited grace period, or NULL */ | ||
| 53 | /* if there is no such task. If there */ | ||
| 54 | /* is no current expedited grace period, */ | ||
| 55 | /* then there cannot be any such task. */ | ||
| 56 | u8 gpnum; /* Current grace period. */ | ||
| 57 | u8 gpcpu; /* Last grace period blocked by the CPU. */ | ||
| 58 | u8 completed; /* Last grace period completed. */ | ||
| 59 | /* If all three are equal, RCU is idle. */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | ||
| 63 | .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
| 64 | .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
| 65 | .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
| 66 | .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), | ||
| 67 | }; | ||
| 68 | |||
| 69 | static int rcu_preempted_readers_exp(void); | ||
| 70 | static void rcu_report_exp_done(void); | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Return true if the CPU has not yet responded to the current grace period. | ||
| 74 | */ | ||
| 75 | static int rcu_cpu_blocking_cur_gp(void) | ||
| 76 | { | ||
| 77 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; | ||
| 78 | } | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Check for a running RCU reader. Because there is only one CPU, | ||
| 82 | * there can be but one running RCU reader at a time. ;-) | ||
| 83 | */ | ||
| 84 | static int rcu_preempt_running_reader(void) | ||
| 85 | { | ||
| 86 | return current->rcu_read_lock_nesting; | ||
| 87 | } | ||
| 88 | |||
| 89 | /* | ||
| 90 | * Check for preempted RCU readers blocking any grace period. | ||
| 91 | * If the caller needs a reliable answer, it must disable hard irqs. | ||
| 92 | */ | ||
| 93 | static int rcu_preempt_blocked_readers_any(void) | ||
| 94 | { | ||
| 95 | return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); | ||
| 96 | } | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Check for preempted RCU readers blocking the current grace period. | ||
| 100 | * If the caller needs a reliable answer, it must disable hard irqs. | ||
| 101 | */ | ||
| 102 | static int rcu_preempt_blocked_readers_cgp(void) | ||
| 103 | { | ||
| 104 | return rcu_preempt_ctrlblk.gp_tasks != NULL; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Return true if another preemptible-RCU grace period is needed. | ||
| 109 | */ | ||
| 110 | static int rcu_preempt_needs_another_gp(void) | ||
| 111 | { | ||
| 112 | return *rcu_preempt_ctrlblk.rcb.curtail != NULL; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Return true if a preemptible-RCU grace period is in progress. | ||
| 117 | * The caller must disable hardirqs. | ||
| 118 | */ | ||
| 119 | static int rcu_preempt_gp_in_progress(void) | ||
| 120 | { | ||
| 121 | return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | ||
| 126 | * that this just means that the task currently running on the CPU is | ||
| 127 | * in a quiescent state. There might be any number of tasks blocked | ||
| 128 | * while in an RCU read-side critical section. | ||
| 129 | * | ||
| 130 | * Unlike the other rcu_*_qs() functions, callers to this function | ||
| 131 | * must disable irqs in order to protect the assignment to | ||
| 132 | * ->rcu_read_unlock_special. | ||
| 133 | * | ||
| 134 | * Because this is a single-CPU implementation, the only way a grace | ||
| 135 | * period can end is if the CPU is in a quiescent state. The reason is | ||
| 136 | * that a blocked preemptible-RCU reader can exit its critical section | ||
| 137 | * only if the CPU is running it at the time. Therefore, when the | ||
| 138 | * last task blocking the current grace period exits its RCU read-side | ||
| 139 | * critical section, neither the CPU nor blocked tasks will be stopping | ||
| 140 | * the current grace period. (In contrast, SMP implementations | ||
| 141 | * might have CPUs running in RCU read-side critical sections that | ||
| 142 | * block later grace periods -- but this is not possible given only | ||
| 143 | * one CPU.) | ||
| 144 | */ | ||
| 145 | static void rcu_preempt_cpu_qs(void) | ||
| 146 | { | ||
| 147 | /* Record both CPU and task as having responded to current GP. */ | ||
| 148 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; | ||
| 149 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 150 | |||
| 151 | /* | ||
| 152 | * If there is no GP, or if blocked readers are still blocking GP, | ||
| 153 | * then there is nothing more to do. | ||
| 154 | */ | ||
| 155 | if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) | ||
| 156 | return; | ||
| 157 | |||
| 158 | /* Advance callbacks. */ | ||
| 159 | rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; | ||
| 160 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; | ||
| 161 | rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; | ||
| 162 | |||
| 163 | /* If there are no blocked readers, next GP is done instantly. */ | ||
| 164 | if (!rcu_preempt_blocked_readers_any()) | ||
| 165 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; | ||
| 166 | |||
| 167 | /* If there are done callbacks, make RCU_SOFTIRQ process them. */ | ||
| 168 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) | ||
| 169 | raise_softirq(RCU_SOFTIRQ); | ||
| 170 | } | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Start a new RCU grace period if warranted. Hard irqs must be disabled. | ||
| 174 | */ | ||
| 175 | static void rcu_preempt_start_gp(void) | ||
| 176 | { | ||
| 177 | if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { | ||
| 178 | |||
| 179 | /* Official start of GP. */ | ||
| 180 | rcu_preempt_ctrlblk.gpnum++; | ||
| 181 | |||
| 182 | /* Any blocked RCU readers block new GP. */ | ||
| 183 | if (rcu_preempt_blocked_readers_any()) | ||
| 184 | rcu_preempt_ctrlblk.gp_tasks = | ||
| 185 | rcu_preempt_ctrlblk.blkd_tasks.next; | ||
| 186 | |||
| 187 | /* If there is no running reader, CPU is done with GP. */ | ||
| 188 | if (!rcu_preempt_running_reader()) | ||
| 189 | rcu_preempt_cpu_qs(); | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | /* | ||
| 194 | * We have entered the scheduler, and the current task might soon be | ||
| 195 | * context-switched away from. If this task is in an RCU read-side | ||
| 196 | * critical section, we will no longer be able to rely on the CPU to | ||
| 197 | * record that fact, so we enqueue the task on the blkd_tasks list. | ||
| 198 | * If the task started after the current grace period began, as recorded | ||
| 199 | * by ->gpcpu, we enqueue at the beginning of the list. Otherwise | ||
| 200 | * before the element referenced by ->gp_tasks (or at the tail if | ||
| 201 | * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. | ||
| 202 | * The task will dequeue itself when it exits the outermost enclosing | ||
| 203 | * RCU read-side critical section. Therefore, the current grace period | ||
| 204 | * cannot be permitted to complete until the ->gp_tasks pointer becomes | ||
| 205 | * NULL. | ||
| 206 | * | ||
| 207 | * Caller must disable preemption. | ||
| 208 | */ | ||
| 209 | void rcu_preempt_note_context_switch(void) | ||
| 210 | { | ||
| 211 | struct task_struct *t = current; | ||
| 212 | unsigned long flags; | ||
| 213 | |||
| 214 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | ||
| 215 | if (rcu_preempt_running_reader() && | ||
| 216 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | ||
| 217 | |||
| 218 | /* Possibly blocking in an RCU read-side critical section. */ | ||
| 219 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | ||
| 220 | |||
| 221 | /* | ||
| 222 | * If this CPU has already checked in, then this task | ||
| 223 | * will hold up the next grace period rather than the | ||
| 224 | * current grace period. Queue the task accordingly. | ||
| 225 | * If the task is queued for the current grace period | ||
| 226 | * (i.e., this CPU has not yet passed through a quiescent | ||
| 227 | * state for the current grace period), then as long | ||
| 228 | * as that task remains queued, the current grace period | ||
| 229 | * cannot end. | ||
| 230 | */ | ||
| 231 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | ||
| 232 | if (rcu_cpu_blocking_cur_gp()) | ||
| 233 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | ||
| 234 | } | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Either we were not in an RCU read-side critical section to | ||
| 238 | * begin with, or we have now recorded that critical section | ||
| 239 | * globally. Either way, we can now note a quiescent state | ||
| 240 | * for this CPU. Again, if we were in an RCU read-side critical | ||
| 241 | * section, and if that critical section was blocking the current | ||
| 242 | * grace period, then the fact that the task has been enqueued | ||
| 243 | * means that current grace period continues to be blocked. | ||
| 244 | */ | ||
| 245 | rcu_preempt_cpu_qs(); | ||
| 246 | local_irq_restore(flags); | ||
| 247 | } | ||
| 248 | |||
| 249 | /* | ||
| 250 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | ||
| 251 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
| 252 | * if we block. | ||
| 253 | */ | ||
| 254 | void __rcu_read_lock(void) | ||
| 255 | { | ||
| 256 | current->rcu_read_lock_nesting++; | ||
| 257 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | ||
| 258 | } | ||
| 259 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
| 260 | |||
| 261 | /* | ||
| 262 | * Handle special cases during rcu_read_unlock(), such as needing to | ||
| 263 | * notify RCU core processing or task having blocked during the RCU | ||
| 264 | * read-side critical section. | ||
| 265 | */ | ||
| 266 | static void rcu_read_unlock_special(struct task_struct *t) | ||
| 267 | { | ||
| 268 | int empty; | ||
| 269 | int empty_exp; | ||
| 270 | unsigned long flags; | ||
| 271 | struct list_head *np; | ||
| 272 | int special; | ||
| 273 | |||
| 274 | /* | ||
| 275 | * NMI handlers cannot block and cannot safely manipulate state. | ||
| 276 | * They therefore cannot possibly be special, so just leave. | ||
| 277 | */ | ||
| 278 | if (in_nmi()) | ||
| 279 | return; | ||
| 280 | |||
| 281 | local_irq_save(flags); | ||
| 282 | |||
| 283 | /* | ||
| 284 | * If RCU core is waiting for this CPU to exit critical section, | ||
| 285 | * let it know that we have done so. | ||
| 286 | */ | ||
| 287 | special = t->rcu_read_unlock_special; | ||
| 288 | if (special & RCU_READ_UNLOCK_NEED_QS) | ||
| 289 | rcu_preempt_cpu_qs(); | ||
| 290 | |||
| 291 | /* Hardware IRQ handlers cannot block. */ | ||
| 292 | if (in_irq()) { | ||
| 293 | local_irq_restore(flags); | ||
| 294 | return; | ||
| 295 | } | ||
| 296 | |||
| 297 | /* Clean up if blocked during RCU read-side critical section. */ | ||
| 298 | if (special & RCU_READ_UNLOCK_BLOCKED) { | ||
| 299 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | ||
| 300 | |||
| 301 | /* | ||
| 302 | * Remove this task from the ->blkd_tasks list and adjust | ||
| 303 | * any pointers that might have been referencing it. | ||
| 304 | */ | ||
| 305 | empty = !rcu_preempt_blocked_readers_cgp(); | ||
| 306 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | ||
| 307 | np = t->rcu_node_entry.next; | ||
| 308 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | ||
| 309 | np = NULL; | ||
| 310 | list_del(&t->rcu_node_entry); | ||
| 311 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) | ||
| 312 | rcu_preempt_ctrlblk.gp_tasks = np; | ||
| 313 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | ||
| 314 | rcu_preempt_ctrlblk.exp_tasks = np; | ||
| 315 | INIT_LIST_HEAD(&t->rcu_node_entry); | ||
| 316 | |||
| 317 | /* | ||
| 318 | * If this was the last task on the current list, and if | ||
| 319 | * we aren't waiting on the CPU, report the quiescent state | ||
| 320 | * and start a new grace period if needed. | ||
| 321 | */ | ||
| 322 | if (!empty && !rcu_preempt_blocked_readers_cgp()) { | ||
| 323 | rcu_preempt_cpu_qs(); | ||
| 324 | rcu_preempt_start_gp(); | ||
| 325 | } | ||
| 326 | |||
| 327 | /* | ||
| 328 | * If this was the last task on the expedited lists, | ||
| 329 | * then we need wake up the waiting task. | ||
| 330 | */ | ||
| 331 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) | ||
| 332 | rcu_report_exp_done(); | ||
| 333 | } | ||
| 334 | local_irq_restore(flags); | ||
| 335 | } | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | ||
| 339 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
| 340 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
| 341 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
| 342 | * in an RCU read-side critical section and other special cases. | ||
| 343 | */ | ||
| 344 | void __rcu_read_unlock(void) | ||
| 345 | { | ||
| 346 | struct task_struct *t = current; | ||
| 347 | |||
| 348 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | ||
| 349 | --t->rcu_read_lock_nesting; | ||
| 350 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | ||
| 351 | if (t->rcu_read_lock_nesting == 0 && | ||
| 352 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
| 353 | rcu_read_unlock_special(t); | ||
| 354 | #ifdef CONFIG_PROVE_LOCKING | ||
| 355 | WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); | ||
| 356 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
| 357 | } | ||
| 358 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
| 359 | |||
| 360 | /* | ||
| 361 | * Check for a quiescent state from the current CPU. When a task blocks, | ||
| 362 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | ||
| 363 | * checked elsewhere. This is called from the scheduling-clock interrupt. | ||
| 364 | * | ||
| 365 | * Caller must disable hard irqs. | ||
| 366 | */ | ||
| 367 | static void rcu_preempt_check_callbacks(void) | ||
| 368 | { | ||
| 369 | struct task_struct *t = current; | ||
| 370 | |||
| 371 | if (rcu_preempt_gp_in_progress() && | ||
| 372 | (!rcu_preempt_running_reader() || | ||
| 373 | !rcu_cpu_blocking_cur_gp())) | ||
| 374 | rcu_preempt_cpu_qs(); | ||
| 375 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | ||
| 376 | rcu_preempt_ctrlblk.rcb.donetail) | ||
| 377 | raise_softirq(RCU_SOFTIRQ); | ||
| 378 | if (rcu_preempt_gp_in_progress() && | ||
| 379 | rcu_cpu_blocking_cur_gp() && | ||
| 380 | rcu_preempt_running_reader()) | ||
| 381 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | ||
| 382 | } | ||
| 383 | |||
| 384 | /* | ||
| 385 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to | ||
| 386 | * update, so this is invoked from __rcu_process_callbacks() to | ||
| 387 | * handle that case. Of course, it is invoked for all flavors of | ||
| 388 | * RCU, but RCU callbacks can appear only on one of the lists, and | ||
| 389 | * neither ->nexttail nor ->donetail can possibly be NULL, so there | ||
| 390 | * is no need for an explicit check. | ||
| 391 | */ | ||
| 392 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | ||
| 393 | { | ||
| 394 | if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) | ||
| 395 | rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * Process callbacks for preemptible RCU. | ||
| 400 | */ | ||
| 401 | static void rcu_preempt_process_callbacks(void) | ||
| 402 | { | ||
| 403 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); | ||
| 404 | } | ||
| 405 | |||
| 406 | /* | ||
| 407 | * Queue a preemptible -RCU callback for invocation after a grace period. | ||
| 408 | */ | ||
| 409 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 410 | { | ||
| 411 | unsigned long flags; | ||
| 412 | |||
| 413 | debug_rcu_head_queue(head); | ||
| 414 | head->func = func; | ||
| 415 | head->next = NULL; | ||
| 416 | |||
| 417 | local_irq_save(flags); | ||
| 418 | *rcu_preempt_ctrlblk.nexttail = head; | ||
| 419 | rcu_preempt_ctrlblk.nexttail = &head->next; | ||
| 420 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ | ||
| 421 | local_irq_restore(flags); | ||
| 422 | } | ||
| 423 | EXPORT_SYMBOL_GPL(call_rcu); | ||
| 424 | |||
| 425 | void rcu_barrier(void) | ||
| 426 | { | ||
| 427 | struct rcu_synchronize rcu; | ||
| 428 | |||
| 429 | init_rcu_head_on_stack(&rcu.head); | ||
| 430 | init_completion(&rcu.completion); | ||
| 431 | /* Will wake me after RCU finished. */ | ||
| 432 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
| 433 | /* Wait for it. */ | ||
| 434 | wait_for_completion(&rcu.completion); | ||
| 435 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 436 | } | ||
| 437 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
| 438 | |||
| 439 | /* | ||
| 440 | * synchronize_rcu - wait until a grace period has elapsed. | ||
| 441 | * | ||
| 442 | * Control will return to the caller some time after a full grace | ||
| 443 | * period has elapsed, in other words after all currently executing RCU | ||
| 444 | * read-side critical sections have completed. RCU read-side critical | ||
| 445 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
| 446 | * and may be nested. | ||
| 447 | */ | ||
| 448 | void synchronize_rcu(void) | ||
| 449 | { | ||
| 450 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 451 | if (!rcu_scheduler_active) | ||
| 452 | return; | ||
| 453 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 454 | |||
| 455 | WARN_ON_ONCE(rcu_preempt_running_reader()); | ||
| 456 | if (!rcu_preempt_blocked_readers_any()) | ||
| 457 | return; | ||
| 458 | |||
| 459 | /* Once we get past the fastpath checks, same code as rcu_barrier(). */ | ||
| 460 | rcu_barrier(); | ||
| 461 | } | ||
| 462 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
| 463 | |||
| 464 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | ||
| 465 | static unsigned long sync_rcu_preempt_exp_count; | ||
| 466 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | ||
| 467 | |||
| 468 | /* | ||
| 469 | * Return non-zero if there are any tasks in RCU read-side critical | ||
| 470 | * sections blocking the current preemptible-RCU expedited grace period. | ||
| 471 | * If there is no preemptible-RCU expedited grace period currently in | ||
| 472 | * progress, returns zero unconditionally. | ||
| 473 | */ | ||
| 474 | static int rcu_preempted_readers_exp(void) | ||
| 475 | { | ||
| 476 | return rcu_preempt_ctrlblk.exp_tasks != NULL; | ||
| 477 | } | ||
| 478 | |||
| 479 | /* | ||
| 480 | * Report the exit from RCU read-side critical section for the last task | ||
| 481 | * that queued itself during or before the current expedited preemptible-RCU | ||
| 482 | * grace period. | ||
| 483 | */ | ||
| 484 | static void rcu_report_exp_done(void) | ||
| 485 | { | ||
| 486 | wake_up(&sync_rcu_preempt_exp_wq); | ||
| 487 | } | ||
| 488 | |||
| 489 | /* | ||
| 490 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | ||
| 491 | * is to rely in the fact that there is but one CPU, and that it is | ||
| 492 | * illegal for a task to invoke synchronize_rcu_expedited() while in a | ||
| 493 | * preemptible-RCU read-side critical section. Therefore, any such | ||
| 494 | * critical sections must correspond to blocked tasks, which must therefore | ||
| 495 | * be on the ->blkd_tasks list. So just record the current head of the | ||
| 496 | * list in the ->exp_tasks pointer, and wait for all tasks including and | ||
| 497 | * after the task pointed to by ->exp_tasks to drain. | ||
| 498 | */ | ||
| 499 | void synchronize_rcu_expedited(void) | ||
| 500 | { | ||
| 501 | unsigned long flags; | ||
| 502 | struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; | ||
| 503 | unsigned long snap; | ||
| 504 | |||
| 505 | barrier(); /* ensure prior action seen before grace period. */ | ||
| 506 | |||
| 507 | WARN_ON_ONCE(rcu_preempt_running_reader()); | ||
| 508 | |||
| 509 | /* | ||
| 510 | * Acquire lock so that there is only one preemptible RCU grace | ||
| 511 | * period in flight. Of course, if someone does the expedited | ||
| 512 | * grace period for us while we are acquiring the lock, just leave. | ||
| 513 | */ | ||
| 514 | snap = sync_rcu_preempt_exp_count + 1; | ||
| 515 | mutex_lock(&sync_rcu_preempt_exp_mutex); | ||
| 516 | if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) | ||
| 517 | goto unlock_mb_ret; /* Others did our work for us. */ | ||
| 518 | |||
| 519 | local_irq_save(flags); | ||
| 520 | |||
| 521 | /* | ||
| 522 | * All RCU readers have to already be on blkd_tasks because | ||
| 523 | * we cannot legally be executing in an RCU read-side critical | ||
| 524 | * section. | ||
| 525 | */ | ||
| 526 | |||
| 527 | /* Snapshot current head of ->blkd_tasks list. */ | ||
| 528 | rpcp->exp_tasks = rpcp->blkd_tasks.next; | ||
| 529 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) | ||
| 530 | rpcp->exp_tasks = NULL; | ||
| 531 | local_irq_restore(flags); | ||
| 532 | |||
| 533 | /* Wait for tail of ->blkd_tasks list to drain. */ | ||
| 534 | if (rcu_preempted_readers_exp()) | ||
| 535 | wait_event(sync_rcu_preempt_exp_wq, | ||
| 536 | !rcu_preempted_readers_exp()); | ||
| 537 | |||
| 538 | /* Clean up and exit. */ | ||
| 539 | barrier(); /* ensure expedited GP seen before counter increment. */ | ||
| 540 | sync_rcu_preempt_exp_count++; | ||
| 541 | unlock_mb_ret: | ||
| 542 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | ||
| 543 | barrier(); /* ensure subsequent action seen after grace period. */ | ||
| 544 | } | ||
| 545 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
| 546 | |||
| 547 | /* | ||
| 548 | * Does preemptible RCU need the CPU to stay out of dynticks mode? | ||
| 549 | */ | ||
| 550 | int rcu_preempt_needs_cpu(void) | ||
| 551 | { | ||
| 552 | if (!rcu_preempt_running_reader()) | ||
| 553 | rcu_preempt_cpu_qs(); | ||
| 554 | return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; | ||
| 555 | } | ||
| 556 | |||
| 557 | /* | ||
| 558 | * Check for a task exiting while in a preemptible -RCU read-side | ||
| 559 | * critical section, clean up if so. No need to issue warnings, | ||
| 560 | * as debug_check_no_locks_held() already does this if lockdep | ||
| 561 | * is enabled. | ||
| 562 | */ | ||
| 563 | void exit_rcu(void) | ||
| 564 | { | ||
| 565 | struct task_struct *t = current; | ||
| 566 | |||
| 567 | if (t->rcu_read_lock_nesting == 0) | ||
| 568 | return; | ||
| 569 | t->rcu_read_lock_nesting = 1; | ||
| 570 | rcu_read_unlock(); | ||
| 571 | } | ||
| 572 | |||
| 573 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ | ||
| 574 | |||
| 575 | /* | ||
| 576 | * Because preemptible RCU does not exist, it never has any callbacks | ||
| 577 | * to check. | ||
| 578 | */ | ||
| 579 | static void rcu_preempt_check_callbacks(void) | ||
| 580 | { | ||
| 581 | } | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Because preemptible RCU does not exist, it never has any callbacks | ||
| 585 | * to remove. | ||
| 586 | */ | ||
| 587 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | ||
| 588 | { | ||
| 589 | } | ||
| 590 | |||
| 591 | /* | ||
| 592 | * Because preemptible RCU does not exist, it never has any callbacks | ||
| 593 | * to process. | ||
| 594 | */ | ||
| 595 | static void rcu_preempt_process_callbacks(void) | ||
| 596 | { | ||
| 597 | } | ||
| 598 | |||
| 599 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ | ||
| 600 | |||
| 25 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 601 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 26 | 602 | ||
| 27 | #include <linux/kernel_stat.h> | 603 | #include <linux/kernel_stat.h> |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 2e2726d790b9..9d8e8fb2515f 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -120,7 +120,7 @@ struct rcu_torture { | |||
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | static LIST_HEAD(rcu_torture_freelist); | 122 | static LIST_HEAD(rcu_torture_freelist); |
| 123 | static struct rcu_torture *rcu_torture_current; | 123 | static struct rcu_torture __rcu *rcu_torture_current; |
| 124 | static long rcu_torture_current_version; | 124 | static long rcu_torture_current_version; |
| 125 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 125 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 126 | static DEFINE_SPINLOCK(rcu_torture_lock); | 126 | static DEFINE_SPINLOCK(rcu_torture_lock); |
| @@ -153,8 +153,10 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | |||
| 153 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ | 153 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ |
| 154 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ | 154 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ |
| 155 | static int fullstop = FULLSTOP_RMMOD; | 155 | static int fullstop = FULLSTOP_RMMOD; |
| 156 | DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ | 156 | /* |
| 157 | /* of kthreads. */ | 157 | * Protect fullstop transitions and spawning of kthreads. |
| 158 | */ | ||
| 159 | static DEFINE_MUTEX(fullstop_mutex); | ||
| 158 | 160 | ||
| 159 | /* | 161 | /* |
| 160 | * Detect and respond to a system shutdown. | 162 | * Detect and respond to a system shutdown. |
| @@ -303,6 +305,10 @@ static void rcu_read_delay(struct rcu_random_state *rrsp) | |||
| 303 | mdelay(longdelay_ms); | 305 | mdelay(longdelay_ms); |
| 304 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) | 306 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
| 305 | udelay(shortdelay_us); | 307 | udelay(shortdelay_us); |
| 308 | #ifdef CONFIG_PREEMPT | ||
| 309 | if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000))) | ||
| 310 | preempt_schedule(); /* No QS if preempt_disable() in effect */ | ||
| 311 | #endif | ||
| 306 | } | 312 | } |
| 307 | 313 | ||
| 308 | static void rcu_torture_read_unlock(int idx) __releases(RCU) | 314 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
| @@ -536,6 +542,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp) | |||
| 536 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); | 542 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); |
| 537 | if (!delay) | 543 | if (!delay) |
| 538 | schedule_timeout_interruptible(longdelay); | 544 | schedule_timeout_interruptible(longdelay); |
| 545 | else | ||
| 546 | rcu_read_delay(rrsp); | ||
| 539 | } | 547 | } |
| 540 | 548 | ||
| 541 | static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) | 549 | static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) |
| @@ -731,7 +739,8 @@ rcu_torture_writer(void *arg) | |||
| 731 | continue; | 739 | continue; |
| 732 | rp->rtort_pipe_count = 0; | 740 | rp->rtort_pipe_count = 0; |
| 733 | udelay(rcu_random(&rand) & 0x3ff); | 741 | udelay(rcu_random(&rand) & 0x3ff); |
| 734 | old_rp = rcu_torture_current; | 742 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 743 | current == writer_task); | ||
| 735 | rp->rtort_mbtest = 1; | 744 | rp->rtort_mbtest = 1; |
| 736 | rcu_assign_pointer(rcu_torture_current, rp); | 745 | rcu_assign_pointer(rcu_torture_current, rp); |
| 737 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ | 746 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d5bc43976c5a..ccdc04c47981 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -143,6 +143,11 @@ module_param(blimit, int, 0); | |||
| 143 | module_param(qhimark, int, 0); | 143 | module_param(qhimark, int, 0); |
| 144 | module_param(qlowmark, int, 0); | 144 | module_param(qlowmark, int, 0); |
| 145 | 145 | ||
| 146 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 147 | int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; | ||
| 148 | module_param(rcu_cpu_stall_suppress, int, 0644); | ||
| 149 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
| 150 | |||
| 146 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 151 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
| 147 | static int rcu_pending(int cpu); | 152 | static int rcu_pending(int cpu); |
| 148 | 153 | ||
| @@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
| 450 | 455 | ||
| 451 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 456 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 452 | 457 | ||
| 453 | int rcu_cpu_stall_panicking __read_mostly; | 458 | int rcu_cpu_stall_suppress __read_mostly; |
| 454 | 459 | ||
| 455 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 460 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
| 456 | { | 461 | { |
| @@ -482,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 482 | rcu_print_task_stall(rnp); | 487 | rcu_print_task_stall(rnp); |
| 483 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 488 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 484 | 489 | ||
| 485 | /* OK, time to rat on our buddy... */ | 490 | /* |
| 486 | 491 | * OK, time to rat on our buddy... | |
| 492 | * See Documentation/RCU/stallwarn.txt for info on how to debug | ||
| 493 | * RCU CPU stall warnings. | ||
| 494 | */ | ||
| 487 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", | 495 | printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", |
| 488 | rsp->name); | 496 | rsp->name); |
| 489 | rcu_for_each_leaf_node(rsp, rnp) { | 497 | rcu_for_each_leaf_node(rsp, rnp) { |
| @@ -512,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
| 512 | unsigned long flags; | 520 | unsigned long flags; |
| 513 | struct rcu_node *rnp = rcu_get_root(rsp); | 521 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 514 | 522 | ||
| 523 | /* | ||
| 524 | * OK, time to rat on ourselves... | ||
| 525 | * See Documentation/RCU/stallwarn.txt for info on how to debug | ||
| 526 | * RCU CPU stall warnings. | ||
| 527 | */ | ||
| 515 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", | 528 | printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", |
| 516 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); | 529 | rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
| 517 | trigger_all_cpu_backtrace(); | 530 | trigger_all_cpu_backtrace(); |
| @@ -530,11 +543,11 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 530 | long delta; | 543 | long delta; |
| 531 | struct rcu_node *rnp; | 544 | struct rcu_node *rnp; |
| 532 | 545 | ||
| 533 | if (rcu_cpu_stall_panicking) | 546 | if (rcu_cpu_stall_suppress) |
| 534 | return; | 547 | return; |
| 535 | delta = jiffies - rsp->jiffies_stall; | 548 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); |
| 536 | rnp = rdp->mynode; | 549 | rnp = rdp->mynode; |
| 537 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | 550 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { |
| 538 | 551 | ||
| 539 | /* We haven't checked in, so go dump stack. */ | 552 | /* We haven't checked in, so go dump stack. */ |
| 540 | print_cpu_stall(rsp); | 553 | print_cpu_stall(rsp); |
| @@ -548,10 +561,26 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 548 | 561 | ||
| 549 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | 562 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
| 550 | { | 563 | { |
| 551 | rcu_cpu_stall_panicking = 1; | 564 | rcu_cpu_stall_suppress = 1; |
| 552 | return NOTIFY_DONE; | 565 | return NOTIFY_DONE; |
| 553 | } | 566 | } |
| 554 | 567 | ||
| 568 | /** | ||
| 569 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | ||
| 570 | * | ||
| 571 | * Set the stall-warning timeout way off into the future, thus preventing | ||
| 572 | * any RCU CPU stall-warning messages from appearing in the current set of | ||
| 573 | * RCU grace periods. | ||
| 574 | * | ||
| 575 | * The caller must disable hard irqs. | ||
| 576 | */ | ||
| 577 | void rcu_cpu_stall_reset(void) | ||
| 578 | { | ||
| 579 | rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 580 | rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 581 | rcu_preempt_stall_reset(); | ||
| 582 | } | ||
| 583 | |||
| 555 | static struct notifier_block rcu_panic_block = { | 584 | static struct notifier_block rcu_panic_block = { |
| 556 | .notifier_call = rcu_panic, | 585 | .notifier_call = rcu_panic, |
| 557 | }; | 586 | }; |
| @@ -571,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 571 | { | 600 | { |
| 572 | } | 601 | } |
| 573 | 602 | ||
| 603 | void rcu_cpu_stall_reset(void) | ||
| 604 | { | ||
| 605 | } | ||
| 606 | |||
| 574 | static void __init check_cpu_stall_init(void) | 607 | static void __init check_cpu_stall_init(void) |
| 575 | { | 608 | { |
| 576 | } | 609 | } |
| @@ -712,7 +745,7 @@ static void | |||
| 712 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | 745 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) |
| 713 | __releases(rcu_get_root(rsp)->lock) | 746 | __releases(rcu_get_root(rsp)->lock) |
| 714 | { | 747 | { |
| 715 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 748 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
| 716 | struct rcu_node *rnp = rcu_get_root(rsp); | 749 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 717 | 750 | ||
| 718 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { | 751 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { |
| @@ -960,7 +993,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 960 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | 993 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) |
| 961 | { | 994 | { |
| 962 | int i; | 995 | int i; |
| 963 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 996 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
| 964 | 997 | ||
| 965 | if (rdp->nxtlist == NULL) | 998 | if (rdp->nxtlist == NULL) |
| 966 | return; /* irqs disabled, so comparison is stable. */ | 999 | return; /* irqs disabled, so comparison is stable. */ |
| @@ -971,6 +1004,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | |||
| 971 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 1004 | for (i = 0; i < RCU_NEXT_SIZE; i++) |
| 972 | rdp->nxttail[i] = &rdp->nxtlist; | 1005 | rdp->nxttail[i] = &rdp->nxtlist; |
| 973 | rsp->orphan_qlen += rdp->qlen; | 1006 | rsp->orphan_qlen += rdp->qlen; |
| 1007 | rdp->n_cbs_orphaned += rdp->qlen; | ||
| 974 | rdp->qlen = 0; | 1008 | rdp->qlen = 0; |
| 975 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 1009 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ |
| 976 | } | 1010 | } |
| @@ -984,7 +1018,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
| 984 | struct rcu_data *rdp; | 1018 | struct rcu_data *rdp; |
| 985 | 1019 | ||
| 986 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1020 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
| 987 | rdp = rsp->rda[smp_processor_id()]; | 1021 | rdp = this_cpu_ptr(rsp->rda); |
| 988 | if (rsp->orphan_cbs_list == NULL) { | 1022 | if (rsp->orphan_cbs_list == NULL) { |
| 989 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 1023 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
| 990 | return; | 1024 | return; |
| @@ -992,6 +1026,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
| 992 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | 1026 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; |
| 993 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | 1027 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; |
| 994 | rdp->qlen += rsp->orphan_qlen; | 1028 | rdp->qlen += rsp->orphan_qlen; |
| 1029 | rdp->n_cbs_adopted += rsp->orphan_qlen; | ||
| 995 | rsp->orphan_cbs_list = NULL; | 1030 | rsp->orphan_cbs_list = NULL; |
| 996 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | 1031 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; |
| 997 | rsp->orphan_qlen = 0; | 1032 | rsp->orphan_qlen = 0; |
| @@ -1007,7 +1042,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 1007 | unsigned long flags; | 1042 | unsigned long flags; |
| 1008 | unsigned long mask; | 1043 | unsigned long mask; |
| 1009 | int need_report = 0; | 1044 | int need_report = 0; |
| 1010 | struct rcu_data *rdp = rsp->rda[cpu]; | 1045 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1011 | struct rcu_node *rnp; | 1046 | struct rcu_node *rnp; |
| 1012 | 1047 | ||
| 1013 | /* Exclude any attempts to start a new grace period. */ | 1048 | /* Exclude any attempts to start a new grace period. */ |
| @@ -1123,6 +1158,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1123 | 1158 | ||
| 1124 | /* Update count, and requeue any remaining callbacks. */ | 1159 | /* Update count, and requeue any remaining callbacks. */ |
| 1125 | rdp->qlen -= count; | 1160 | rdp->qlen -= count; |
| 1161 | rdp->n_cbs_invoked += count; | ||
| 1126 | if (list != NULL) { | 1162 | if (list != NULL) { |
| 1127 | *tail = rdp->nxtlist; | 1163 | *tail = rdp->nxtlist; |
| 1128 | rdp->nxtlist = list; | 1164 | rdp->nxtlist = list; |
| @@ -1226,7 +1262,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
| 1226 | cpu = rnp->grplo; | 1262 | cpu = rnp->grplo; |
| 1227 | bit = 1; | 1263 | bit = 1; |
| 1228 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { | 1264 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
| 1229 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1265 | if ((rnp->qsmask & bit) != 0 && |
| 1266 | f(per_cpu_ptr(rsp->rda, cpu))) | ||
| 1230 | mask |= bit; | 1267 | mask |= bit; |
| 1231 | } | 1268 | } |
| 1232 | if (mask != 0) { | 1269 | if (mask != 0) { |
| @@ -1402,7 +1439,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
| 1402 | * a quiescent state betweentimes. | 1439 | * a quiescent state betweentimes. |
| 1403 | */ | 1440 | */ |
| 1404 | local_irq_save(flags); | 1441 | local_irq_save(flags); |
| 1405 | rdp = rsp->rda[smp_processor_id()]; | 1442 | rdp = this_cpu_ptr(rsp->rda); |
| 1406 | rcu_process_gp_end(rsp, rdp); | 1443 | rcu_process_gp_end(rsp, rdp); |
| 1407 | check_for_new_grace_period(rsp, rdp); | 1444 | check_for_new_grace_period(rsp, rdp); |
| 1408 | 1445 | ||
| @@ -1701,7 +1738,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 1701 | { | 1738 | { |
| 1702 | unsigned long flags; | 1739 | unsigned long flags; |
| 1703 | int i; | 1740 | int i; |
| 1704 | struct rcu_data *rdp = rsp->rda[cpu]; | 1741 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1705 | struct rcu_node *rnp = rcu_get_root(rsp); | 1742 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 1706 | 1743 | ||
| 1707 | /* Set up local state, ensuring consistent view of global state. */ | 1744 | /* Set up local state, ensuring consistent view of global state. */ |
| @@ -1729,7 +1766,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
| 1729 | { | 1766 | { |
| 1730 | unsigned long flags; | 1767 | unsigned long flags; |
| 1731 | unsigned long mask; | 1768 | unsigned long mask; |
| 1732 | struct rcu_data *rdp = rsp->rda[cpu]; | 1769 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1733 | struct rcu_node *rnp = rcu_get_root(rsp); | 1770 | struct rcu_node *rnp = rcu_get_root(rsp); |
| 1734 | 1771 | ||
| 1735 | /* Set up local state, ensuring consistent view of global state. */ | 1772 | /* Set up local state, ensuring consistent view of global state. */ |
| @@ -1865,7 +1902,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
| 1865 | /* | 1902 | /* |
| 1866 | * Helper function for rcu_init() that initializes one rcu_state structure. | 1903 | * Helper function for rcu_init() that initializes one rcu_state structure. |
| 1867 | */ | 1904 | */ |
| 1868 | static void __init rcu_init_one(struct rcu_state *rsp) | 1905 | static void __init rcu_init_one(struct rcu_state *rsp, |
| 1906 | struct rcu_data __percpu *rda) | ||
| 1869 | { | 1907 | { |
| 1870 | static char *buf[] = { "rcu_node_level_0", | 1908 | static char *buf[] = { "rcu_node_level_0", |
| 1871 | "rcu_node_level_1", | 1909 | "rcu_node_level_1", |
| @@ -1918,37 +1956,23 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1918 | } | 1956 | } |
| 1919 | } | 1957 | } |
| 1920 | 1958 | ||
| 1959 | rsp->rda = rda; | ||
| 1921 | rnp = rsp->level[NUM_RCU_LVLS - 1]; | 1960 | rnp = rsp->level[NUM_RCU_LVLS - 1]; |
| 1922 | for_each_possible_cpu(i) { | 1961 | for_each_possible_cpu(i) { |
| 1923 | while (i > rnp->grphi) | 1962 | while (i > rnp->grphi) |
| 1924 | rnp++; | 1963 | rnp++; |
| 1925 | rsp->rda[i]->mynode = rnp; | 1964 | per_cpu_ptr(rsp->rda, i)->mynode = rnp; |
| 1926 | rcu_boot_init_percpu_data(i, rsp); | 1965 | rcu_boot_init_percpu_data(i, rsp); |
| 1927 | } | 1966 | } |
| 1928 | } | 1967 | } |
| 1929 | 1968 | ||
| 1930 | /* | ||
| 1931 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used | ||
| 1932 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data | ||
| 1933 | * structure. | ||
| 1934 | */ | ||
| 1935 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | ||
| 1936 | do { \ | ||
| 1937 | int i; \ | ||
| 1938 | \ | ||
| 1939 | for_each_possible_cpu(i) { \ | ||
| 1940 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | ||
| 1941 | } \ | ||
| 1942 | rcu_init_one(rsp); \ | ||
| 1943 | } while (0) | ||
| 1944 | |||
| 1945 | void __init rcu_init(void) | 1969 | void __init rcu_init(void) |
| 1946 | { | 1970 | { |
| 1947 | int cpu; | 1971 | int cpu; |
| 1948 | 1972 | ||
| 1949 | rcu_bootup_announce(); | 1973 | rcu_bootup_announce(); |
| 1950 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1974 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
| 1951 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1975 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
| 1952 | __rcu_init_preempt(); | 1976 | __rcu_init_preempt(); |
| 1953 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1977 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 1954 | 1978 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 14c040b18ed0..91d4170c5c13 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -202,6 +202,9 @@ struct rcu_data { | |||
| 202 | long qlen; /* # of queued callbacks */ | 202 | long qlen; /* # of queued callbacks */ |
| 203 | long qlen_last_fqs_check; | 203 | long qlen_last_fqs_check; |
| 204 | /* qlen at last check for QS forcing */ | 204 | /* qlen at last check for QS forcing */ |
| 205 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ | ||
| 206 | unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */ | ||
| 207 | unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */ | ||
| 205 | unsigned long n_force_qs_snap; | 208 | unsigned long n_force_qs_snap; |
| 206 | /* did other CPU force QS recently? */ | 209 | /* did other CPU force QS recently? */ |
| 207 | long blimit; /* Upper limit on a processed batch */ | 210 | long blimit; /* Upper limit on a processed batch */ |
| @@ -254,19 +257,23 @@ struct rcu_data { | |||
| 254 | #define RCU_STALL_DELAY_DELTA 0 | 257 | #define RCU_STALL_DELAY_DELTA 0 |
| 255 | #endif | 258 | #endif |
| 256 | 259 | ||
| 257 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) | 260 | #define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \ |
| 261 | RCU_STALL_DELAY_DELTA) | ||
| 258 | /* for rsp->jiffies_stall */ | 262 | /* for rsp->jiffies_stall */ |
| 259 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) | 263 | #define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30) |
| 260 | /* for rsp->jiffies_stall */ | 264 | /* for rsp->jiffies_stall */ |
| 261 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | 265 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ |
| 262 | /* to take at least one */ | 266 | /* to take at least one */ |
| 263 | /* scheduling clock irq */ | 267 | /* scheduling clock irq */ |
| 264 | /* before ratting on them. */ | 268 | /* before ratting on them. */ |
| 265 | 269 | ||
| 266 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 270 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE |
| 271 | #define RCU_CPU_STALL_SUPPRESS_INIT 0 | ||
| 272 | #else | ||
| 273 | #define RCU_CPU_STALL_SUPPRESS_INIT 1 | ||
| 274 | #endif | ||
| 267 | 275 | ||
| 268 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 276 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 269 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | ||
| 270 | 277 | ||
| 271 | /* | 278 | /* |
| 272 | * RCU global state, including node hierarchy. This hierarchy is | 279 | * RCU global state, including node hierarchy. This hierarchy is |
| @@ -283,7 +290,7 @@ struct rcu_state { | |||
| 283 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | 290 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ |
| 284 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | 291 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ |
| 285 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | 292 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ |
| 286 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | 293 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
| 287 | 294 | ||
| 288 | /* The following fields are guarded by the root rcu_node's lock. */ | 295 | /* The following fields are guarded by the root rcu_node's lock. */ |
| 289 | 296 | ||
| @@ -365,6 +372,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | |||
| 365 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 372 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 366 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 373 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
| 367 | static void rcu_print_task_stall(struct rcu_node *rnp); | 374 | static void rcu_print_task_stall(struct rcu_node *rnp); |
| 375 | static void rcu_preempt_stall_reset(void); | ||
| 368 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 376 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 369 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 377 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
| 370 | #ifdef CONFIG_HOTPLUG_CPU | 378 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0e4f420245d9..71a4147473f9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 57 | printk(KERN_INFO | 57 | printk(KERN_INFO |
| 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); |
| 59 | #endif | 59 | #endif |
| 60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | 60 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
| 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
| 62 | #endif | 62 | #endif |
| 63 | #if NUM_RCU_LVL_4 != 0 | 63 | #if NUM_RCU_LVL_4 != 0 |
| @@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
| 155 | 155 | ||
| 156 | /* Possibly blocking in an RCU read-side critical section. */ | 156 | /* Possibly blocking in an RCU read-side critical section. */ |
| 157 | rdp = rcu_preempt_state.rda[cpu]; | 157 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
| 158 | rnp = rdp->mynode; | 158 | rnp = rdp->mynode; |
| 159 | raw_spin_lock_irqsave(&rnp->lock, flags); | 159 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
| @@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 201 | */ | 201 | */ |
| 202 | void __rcu_read_lock(void) | 202 | void __rcu_read_lock(void) |
| 203 | { | 203 | { |
| 204 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | 204 | current->rcu_read_lock_nesting++; |
| 205 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | 205 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
| 206 | } | 206 | } |
| 207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | 207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
| @@ -344,7 +344,9 @@ void __rcu_read_unlock(void) | |||
| 344 | struct task_struct *t = current; | 344 | struct task_struct *t = current; |
| 345 | 345 | ||
| 346 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | 346 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ |
| 347 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | 347 | --t->rcu_read_lock_nesting; |
| 348 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | ||
| 349 | if (t->rcu_read_lock_nesting == 0 && | ||
| 348 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 350 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
| 349 | rcu_read_unlock_special(t); | 351 | rcu_read_unlock_special(t); |
| 350 | #ifdef CONFIG_PROVE_LOCKING | 352 | #ifdef CONFIG_PROVE_LOCKING |
| @@ -417,6 +419,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 417 | } | 419 | } |
| 418 | } | 420 | } |
| 419 | 421 | ||
| 422 | /* | ||
| 423 | * Suppress preemptible RCU's CPU stall warnings by pushing the | ||
| 424 | * time of the next stall-warning message comfortably far into the | ||
| 425 | * future. | ||
| 426 | */ | ||
| 427 | static void rcu_preempt_stall_reset(void) | ||
| 428 | { | ||
| 429 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | ||
| 430 | } | ||
| 431 | |||
| 420 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 432 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 421 | 433 | ||
| 422 | /* | 434 | /* |
| @@ -546,9 +558,11 @@ EXPORT_SYMBOL_GPL(call_rcu); | |||
| 546 | * | 558 | * |
| 547 | * Control will return to the caller some time after a full grace | 559 | * Control will return to the caller some time after a full grace |
| 548 | * period has elapsed, in other words after all currently executing RCU | 560 | * period has elapsed, in other words after all currently executing RCU |
| 549 | * read-side critical sections have completed. RCU read-side critical | 561 | * read-side critical sections have completed. Note, however, that |
| 550 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 562 | * upon return from synchronize_rcu(), the caller might well be executing |
| 551 | * and may be nested. | 563 | * concurrently with new RCU read-side critical sections that began while |
| 564 | * synchronize_rcu() was waiting. RCU read-side critical sections are | ||
| 565 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | ||
| 552 | */ | 566 | */ |
| 553 | void synchronize_rcu(void) | 567 | void synchronize_rcu(void) |
| 554 | { | 568 | { |
| @@ -771,7 +785,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void) | |||
| 771 | */ | 785 | */ |
| 772 | static void __init __rcu_init_preempt(void) | 786 | static void __init __rcu_init_preempt(void) |
| 773 | { | 787 | { |
| 774 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | 788 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
| 775 | } | 789 | } |
| 776 | 790 | ||
| 777 | /* | 791 | /* |
| @@ -865,6 +879,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
| 865 | { | 879 | { |
| 866 | } | 880 | } |
| 867 | 881 | ||
| 882 | /* | ||
| 883 | * Because preemptible RCU does not exist, there is no need to suppress | ||
| 884 | * its CPU stall warnings. | ||
| 885 | */ | ||
| 886 | static void rcu_preempt_stall_reset(void) | ||
| 887 | { | ||
| 888 | } | ||
| 889 | |||
| 868 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 890 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 869 | 891 | ||
| 870 | /* | 892 | /* |
| @@ -919,15 +941,6 @@ static void rcu_preempt_process_callbacks(void) | |||
| 919 | } | 941 | } |
| 920 | 942 | ||
| 921 | /* | 943 | /* |
| 922 | * In classic RCU, call_rcu() is just call_rcu_sched(). | ||
| 923 | */ | ||
| 924 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 925 | { | ||
| 926 | call_rcu_sched(head, func); | ||
| 927 | } | ||
| 928 | EXPORT_SYMBOL_GPL(call_rcu); | ||
| 929 | |||
| 930 | /* | ||
| 931 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 944 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
| 932 | * But because preemptable RCU does not exist, map to rcu-sched. | 945 | * But because preemptable RCU does not exist, map to rcu-sched. |
| 933 | */ | 946 | */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 36c95b45738e..d15430b9d122 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
| @@ -64,7 +64,9 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
| 64 | rdp->dynticks_fqs); | 64 | rdp->dynticks_fqs); |
| 65 | #endif /* #ifdef CONFIG_NO_HZ */ | 65 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 66 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | 66 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); |
| 67 | seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit); | 67 | seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit); |
| 68 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", | ||
| 69 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | ||
| 68 | } | 70 | } |
| 69 | 71 | ||
| 70 | #define PRINT_RCU_DATA(name, func, m) \ | 72 | #define PRINT_RCU_DATA(name, func, m) \ |
| @@ -119,7 +121,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
| 119 | rdp->dynticks_fqs); | 121 | rdp->dynticks_fqs); |
| 120 | #endif /* #ifdef CONFIG_NO_HZ */ | 122 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 121 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | 123 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); |
| 122 | seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit); | 124 | seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit); |
| 125 | seq_printf(m, ",%lu,%lu,%lu\n", | ||
| 126 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | ||
| 123 | } | 127 | } |
| 124 | 128 | ||
| 125 | static int show_rcudata_csv(struct seq_file *m, void *unused) | 129 | static int show_rcudata_csv(struct seq_file *m, void *unused) |
| @@ -128,7 +132,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
| 128 | #ifdef CONFIG_NO_HZ | 132 | #ifdef CONFIG_NO_HZ |
| 129 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); | 133 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); |
| 130 | #endif /* #ifdef CONFIG_NO_HZ */ | 134 | #endif /* #ifdef CONFIG_NO_HZ */ |
| 131 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); | 135 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); |
| 132 | #ifdef CONFIG_TREE_PREEMPT_RCU | 136 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 133 | seq_puts(m, "\"rcu_preempt:\"\n"); | 137 | seq_puts(m, "\"rcu_preempt:\"\n"); |
| 134 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); | 138 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); |
| @@ -262,7 +266,7 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) | |||
| 262 | struct rcu_data *rdp; | 266 | struct rcu_data *rdp; |
| 263 | 267 | ||
| 264 | for_each_possible_cpu(cpu) { | 268 | for_each_possible_cpu(cpu) { |
| 265 | rdp = rsp->rda[cpu]; | 269 | rdp = per_cpu_ptr(rsp->rda, cpu); |
| 266 | if (rdp->beenonline) | 270 | if (rdp->beenonline) |
| 267 | print_one_rcu_pending(m, rdp); | 271 | print_one_rcu_pending(m, rdp); |
| 268 | } | 272 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index ed09d4f2a69c..d42992bccdfa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -426,9 +426,7 @@ struct root_domain { | |||
| 426 | */ | 426 | */ |
| 427 | cpumask_var_t rto_mask; | 427 | cpumask_var_t rto_mask; |
| 428 | atomic_t rto_count; | 428 | atomic_t rto_count; |
| 429 | #ifdef CONFIG_SMP | ||
| 430 | struct cpupri cpupri; | 429 | struct cpupri cpupri; |
| 431 | #endif | ||
| 432 | }; | 430 | }; |
| 433 | 431 | ||
| 434 | /* | 432 | /* |
| @@ -437,7 +435,7 @@ struct root_domain { | |||
| 437 | */ | 435 | */ |
| 438 | static struct root_domain def_root_domain; | 436 | static struct root_domain def_root_domain; |
| 439 | 437 | ||
| 440 | #endif | 438 | #endif /* CONFIG_SMP */ |
| 441 | 439 | ||
| 442 | /* | 440 | /* |
| 443 | * This is the main, per-CPU runqueue data structure. | 441 | * This is the main, per-CPU runqueue data structure. |
| @@ -488,11 +486,12 @@ struct rq { | |||
| 488 | */ | 486 | */ |
| 489 | unsigned long nr_uninterruptible; | 487 | unsigned long nr_uninterruptible; |
| 490 | 488 | ||
| 491 | struct task_struct *curr, *idle; | 489 | struct task_struct *curr, *idle, *stop; |
| 492 | unsigned long next_balance; | 490 | unsigned long next_balance; |
| 493 | struct mm_struct *prev_mm; | 491 | struct mm_struct *prev_mm; |
| 494 | 492 | ||
| 495 | u64 clock; | 493 | u64 clock; |
| 494 | u64 clock_task; | ||
| 496 | 495 | ||
| 497 | atomic_t nr_iowait; | 496 | atomic_t nr_iowait; |
| 498 | 497 | ||
| @@ -520,6 +519,10 @@ struct rq { | |||
| 520 | u64 avg_idle; | 519 | u64 avg_idle; |
| 521 | #endif | 520 | #endif |
| 522 | 521 | ||
| 522 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
| 523 | u64 prev_irq_time; | ||
| 524 | #endif | ||
| 525 | |||
| 523 | /* calc_load related fields */ | 526 | /* calc_load related fields */ |
| 524 | unsigned long calc_load_update; | 527 | unsigned long calc_load_update; |
| 525 | long calc_load_active; | 528 | long calc_load_active; |
| @@ -643,10 +646,22 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
| 643 | 646 | ||
| 644 | #endif /* CONFIG_CGROUP_SCHED */ | 647 | #endif /* CONFIG_CGROUP_SCHED */ |
| 645 | 648 | ||
| 649 | static u64 irq_time_cpu(int cpu); | ||
| 650 | static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); | ||
| 651 | |||
| 646 | inline void update_rq_clock(struct rq *rq) | 652 | inline void update_rq_clock(struct rq *rq) |
| 647 | { | 653 | { |
| 648 | if (!rq->skip_clock_update) | 654 | if (!rq->skip_clock_update) { |
| 649 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 655 | int cpu = cpu_of(rq); |
| 656 | u64 irq_time; | ||
| 657 | |||
| 658 | rq->clock = sched_clock_cpu(cpu); | ||
| 659 | irq_time = irq_time_cpu(cpu); | ||
| 660 | if (rq->clock - irq_time > rq->clock_task) | ||
| 661 | rq->clock_task = rq->clock - irq_time; | ||
| 662 | |||
| 663 | sched_irq_time_avg_update(rq, irq_time); | ||
| 664 | } | ||
| 650 | } | 665 | } |
| 651 | 666 | ||
| 652 | /* | 667 | /* |
| @@ -723,7 +738,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
| 723 | size_t cnt, loff_t *ppos) | 738 | size_t cnt, loff_t *ppos) |
| 724 | { | 739 | { |
| 725 | char buf[64]; | 740 | char buf[64]; |
| 726 | char *cmp = buf; | 741 | char *cmp; |
| 727 | int neg = 0; | 742 | int neg = 0; |
| 728 | int i; | 743 | int i; |
| 729 | 744 | ||
| @@ -734,6 +749,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
| 734 | return -EFAULT; | 749 | return -EFAULT; |
| 735 | 750 | ||
| 736 | buf[cnt] = 0; | 751 | buf[cnt] = 0; |
| 752 | cmp = strstrip(buf); | ||
| 737 | 753 | ||
| 738 | if (strncmp(buf, "NO_", 3) == 0) { | 754 | if (strncmp(buf, "NO_", 3) == 0) { |
| 739 | neg = 1; | 755 | neg = 1; |
| @@ -741,9 +757,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
| 741 | } | 757 | } |
| 742 | 758 | ||
| 743 | for (i = 0; sched_feat_names[i]; i++) { | 759 | for (i = 0; sched_feat_names[i]; i++) { |
| 744 | int len = strlen(sched_feat_names[i]); | 760 | if (strcmp(cmp, sched_feat_names[i]) == 0) { |
| 745 | |||
| 746 | if (strncmp(cmp, sched_feat_names[i], len) == 0) { | ||
| 747 | if (neg) | 761 | if (neg) |
| 748 | sysctl_sched_features &= ~(1UL << i); | 762 | sysctl_sched_features &= ~(1UL << i); |
| 749 | else | 763 | else |
| @@ -1840,7 +1854,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| 1840 | 1854 | ||
| 1841 | static const struct sched_class rt_sched_class; | 1855 | static const struct sched_class rt_sched_class; |
| 1842 | 1856 | ||
| 1843 | #define sched_class_highest (&rt_sched_class) | 1857 | #define sched_class_highest (&stop_sched_class) |
| 1844 | #define for_each_class(class) \ | 1858 | #define for_each_class(class) \ |
| 1845 | for (class = sched_class_highest; class; class = class->next) | 1859 | for (class = sched_class_highest; class; class = class->next) |
| 1846 | 1860 | ||
| @@ -1858,12 +1872,6 @@ static void dec_nr_running(struct rq *rq) | |||
| 1858 | 1872 | ||
| 1859 | static void set_load_weight(struct task_struct *p) | 1873 | static void set_load_weight(struct task_struct *p) |
| 1860 | { | 1874 | { |
| 1861 | if (task_has_rt_policy(p)) { | ||
| 1862 | p->se.load.weight = 0; | ||
| 1863 | p->se.load.inv_weight = WMULT_CONST; | ||
| 1864 | return; | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | /* | 1875 | /* |
| 1868 | * SCHED_IDLE tasks get minimal weight: | 1876 | * SCHED_IDLE tasks get minimal weight: |
| 1869 | */ | 1877 | */ |
| @@ -1917,13 +1925,132 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | |||
| 1917 | dec_nr_running(rq); | 1925 | dec_nr_running(rq); |
| 1918 | } | 1926 | } |
| 1919 | 1927 | ||
| 1928 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
| 1929 | |||
| 1930 | /* | ||
| 1931 | * There are no locks covering percpu hardirq/softirq time. | ||
| 1932 | * They are only modified in account_system_vtime, on corresponding CPU | ||
| 1933 | * with interrupts disabled. So, writes are safe. | ||
| 1934 | * They are read and saved off onto struct rq in update_rq_clock(). | ||
| 1935 | * This may result in other CPU reading this CPU's irq time and can | ||
| 1936 | * race with irq/account_system_vtime on this CPU. We would either get old | ||
| 1937 | * or new value (or semi updated value on 32 bit) with a side effect of | ||
| 1938 | * accounting a slice of irq time to wrong task when irq is in progress | ||
| 1939 | * while we read rq->clock. That is a worthy compromise in place of having | ||
| 1940 | * locks on each irq in account_system_time. | ||
| 1941 | */ | ||
| 1942 | static DEFINE_PER_CPU(u64, cpu_hardirq_time); | ||
| 1943 | static DEFINE_PER_CPU(u64, cpu_softirq_time); | ||
| 1944 | |||
| 1945 | static DEFINE_PER_CPU(u64, irq_start_time); | ||
| 1946 | static int sched_clock_irqtime; | ||
| 1947 | |||
| 1948 | void enable_sched_clock_irqtime(void) | ||
| 1949 | { | ||
| 1950 | sched_clock_irqtime = 1; | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | void disable_sched_clock_irqtime(void) | ||
| 1954 | { | ||
| 1955 | sched_clock_irqtime = 0; | ||
| 1956 | } | ||
| 1957 | |||
| 1958 | static u64 irq_time_cpu(int cpu) | ||
| 1959 | { | ||
| 1960 | if (!sched_clock_irqtime) | ||
| 1961 | return 0; | ||
| 1962 | |||
| 1963 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); | ||
| 1964 | } | ||
| 1965 | |||
| 1966 | void account_system_vtime(struct task_struct *curr) | ||
| 1967 | { | ||
| 1968 | unsigned long flags; | ||
| 1969 | int cpu; | ||
| 1970 | u64 now, delta; | ||
| 1971 | |||
| 1972 | if (!sched_clock_irqtime) | ||
| 1973 | return; | ||
| 1974 | |||
| 1975 | local_irq_save(flags); | ||
| 1976 | |||
| 1977 | cpu = smp_processor_id(); | ||
| 1978 | now = sched_clock_cpu(cpu); | ||
| 1979 | delta = now - per_cpu(irq_start_time, cpu); | ||
| 1980 | per_cpu(irq_start_time, cpu) = now; | ||
| 1981 | /* | ||
| 1982 | * We do not account for softirq time from ksoftirqd here. | ||
| 1983 | * We want to continue accounting softirq time to ksoftirqd thread | ||
| 1984 | * in that case, so as not to confuse scheduler with a special task | ||
| 1985 | * that do not consume any time, but still wants to run. | ||
| 1986 | */ | ||
| 1987 | if (hardirq_count()) | ||
| 1988 | per_cpu(cpu_hardirq_time, cpu) += delta; | ||
| 1989 | else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) | ||
| 1990 | per_cpu(cpu_softirq_time, cpu) += delta; | ||
| 1991 | |||
| 1992 | local_irq_restore(flags); | ||
| 1993 | } | ||
| 1994 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
| 1995 | |||
| 1996 | static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) | ||
| 1997 | { | ||
| 1998 | if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { | ||
| 1999 | u64 delta_irq = curr_irq_time - rq->prev_irq_time; | ||
| 2000 | rq->prev_irq_time = curr_irq_time; | ||
| 2001 | sched_rt_avg_update(rq, delta_irq); | ||
| 2002 | } | ||
| 2003 | } | ||
| 2004 | |||
| 2005 | #else | ||
| 2006 | |||
| 2007 | static u64 irq_time_cpu(int cpu) | ||
| 2008 | { | ||
| 2009 | return 0; | ||
| 2010 | } | ||
| 2011 | |||
| 2012 | static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } | ||
| 2013 | |||
| 2014 | #endif | ||
| 2015 | |||
| 1920 | #include "sched_idletask.c" | 2016 | #include "sched_idletask.c" |
| 1921 | #include "sched_fair.c" | 2017 | #include "sched_fair.c" |
| 1922 | #include "sched_rt.c" | 2018 | #include "sched_rt.c" |
| 2019 | #include "sched_stoptask.c" | ||
| 1923 | #ifdef CONFIG_SCHED_DEBUG | 2020 | #ifdef CONFIG_SCHED_DEBUG |
| 1924 | # include "sched_debug.c" | 2021 | # include "sched_debug.c" |
| 1925 | #endif | 2022 | #endif |
| 1926 | 2023 | ||
| 2024 | void sched_set_stop_task(int cpu, struct task_struct *stop) | ||
| 2025 | { | ||
| 2026 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
| 2027 | struct task_struct *old_stop = cpu_rq(cpu)->stop; | ||
| 2028 | |||
| 2029 | if (stop) { | ||
| 2030 | /* | ||
| 2031 | * Make it appear like a SCHED_FIFO task, its something | ||
| 2032 | * userspace knows about and won't get confused about. | ||
| 2033 | * | ||
| 2034 | * Also, it will make PI more or less work without too | ||
| 2035 | * much confusion -- but then, stop work should not | ||
| 2036 | * rely on PI working anyway. | ||
| 2037 | */ | ||
| 2038 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); | ||
| 2039 | |||
| 2040 | stop->sched_class = &stop_sched_class; | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | cpu_rq(cpu)->stop = stop; | ||
| 2044 | |||
| 2045 | if (old_stop) { | ||
| 2046 | /* | ||
| 2047 | * Reset it back to a normal scheduling class so that | ||
| 2048 | * it can die in pieces. | ||
| 2049 | */ | ||
| 2050 | old_stop->sched_class = &rt_sched_class; | ||
| 2051 | } | ||
| 2052 | } | ||
| 2053 | |||
| 1927 | /* | 2054 | /* |
| 1928 | * __normal_prio - return the priority that is based on the static prio | 2055 | * __normal_prio - return the priority that is based on the static prio |
| 1929 | */ | 2056 | */ |
| @@ -2003,6 +2130,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
| 2003 | if (p->sched_class != &fair_sched_class) | 2130 | if (p->sched_class != &fair_sched_class) |
| 2004 | return 0; | 2131 | return 0; |
| 2005 | 2132 | ||
| 2133 | if (unlikely(p->policy == SCHED_IDLE)) | ||
| 2134 | return 0; | ||
| 2135 | |||
| 2006 | /* | 2136 | /* |
| 2007 | * Buddy candidates are cache hot: | 2137 | * Buddy candidates are cache hot: |
| 2008 | */ | 2138 | */ |
| @@ -2852,14 +2982,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
| 2852 | */ | 2982 | */ |
| 2853 | arch_start_context_switch(prev); | 2983 | arch_start_context_switch(prev); |
| 2854 | 2984 | ||
| 2855 | if (likely(!mm)) { | 2985 | if (!mm) { |
| 2856 | next->active_mm = oldmm; | 2986 | next->active_mm = oldmm; |
| 2857 | atomic_inc(&oldmm->mm_count); | 2987 | atomic_inc(&oldmm->mm_count); |
| 2858 | enter_lazy_tlb(oldmm, next); | 2988 | enter_lazy_tlb(oldmm, next); |
| 2859 | } else | 2989 | } else |
| 2860 | switch_mm(oldmm, mm, next); | 2990 | switch_mm(oldmm, mm, next); |
| 2861 | 2991 | ||
| 2862 | if (likely(!prev->mm)) { | 2992 | if (!prev->mm) { |
| 2863 | prev->active_mm = NULL; | 2993 | prev->active_mm = NULL; |
| 2864 | rq->prev_mm = oldmm; | 2994 | rq->prev_mm = oldmm; |
| 2865 | } | 2995 | } |
| @@ -3248,7 +3378,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | |||
| 3248 | 3378 | ||
| 3249 | if (task_current(rq, p)) { | 3379 | if (task_current(rq, p)) { |
| 3250 | update_rq_clock(rq); | 3380 | update_rq_clock(rq); |
| 3251 | ns = rq->clock - p->se.exec_start; | 3381 | ns = rq->clock_task - p->se.exec_start; |
| 3252 | if ((s64)ns < 0) | 3382 | if ((s64)ns < 0) |
| 3253 | ns = 0; | 3383 | ns = 0; |
| 3254 | } | 3384 | } |
| @@ -3397,7 +3527,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
| 3397 | tmp = cputime_to_cputime64(cputime); | 3527 | tmp = cputime_to_cputime64(cputime); |
| 3398 | if (hardirq_count() - hardirq_offset) | 3528 | if (hardirq_count() - hardirq_offset) |
| 3399 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 3529 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
| 3400 | else if (softirq_count()) | 3530 | else if (in_serving_softirq()) |
| 3401 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 3531 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
| 3402 | else | 3532 | else |
| 3403 | cpustat->system = cputime64_add(cpustat->system, tmp); | 3533 | cpustat->system = cputime64_add(cpustat->system, tmp); |
| @@ -3513,9 +3643,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3643 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
| 3514 | 3644 | ||
| 3515 | if (total) { | 3645 | if (total) { |
| 3516 | u64 temp; | 3646 | u64 temp = rtime; |
| 3517 | 3647 | ||
| 3518 | temp = (u64)(rtime * utime); | 3648 | temp *= utime; |
| 3519 | do_div(temp, total); | 3649 | do_div(temp, total); |
| 3520 | utime = (cputime_t)temp; | 3650 | utime = (cputime_t)temp; |
| 3521 | } else | 3651 | } else |
| @@ -3546,9 +3676,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3676 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 3547 | 3677 | ||
| 3548 | if (total) { | 3678 | if (total) { |
| 3549 | u64 temp; | 3679 | u64 temp = rtime; |
| 3550 | 3680 | ||
| 3551 | temp = (u64)(rtime * cputime.utime); | 3681 | temp *= cputime.utime; |
| 3552 | do_div(temp, total); | 3682 | do_div(temp, total); |
| 3553 | utime = (cputime_t)temp; | 3683 | utime = (cputime_t)temp; |
| 3554 | } else | 3684 | } else |
| @@ -3584,7 +3714,7 @@ void scheduler_tick(void) | |||
| 3584 | curr->sched_class->task_tick(rq, curr, 0); | 3714 | curr->sched_class->task_tick(rq, curr, 0); |
| 3585 | raw_spin_unlock(&rq->lock); | 3715 | raw_spin_unlock(&rq->lock); |
| 3586 | 3716 | ||
| 3587 | perf_event_task_tick(curr); | 3717 | perf_event_task_tick(); |
| 3588 | 3718 | ||
| 3589 | #ifdef CONFIG_SMP | 3719 | #ifdef CONFIG_SMP |
| 3590 | rq->idle_at_tick = idle_cpu(cpu); | 3720 | rq->idle_at_tick = idle_cpu(cpu); |
| @@ -3723,17 +3853,13 @@ pick_next_task(struct rq *rq) | |||
| 3723 | return p; | 3853 | return p; |
| 3724 | } | 3854 | } |
| 3725 | 3855 | ||
| 3726 | class = sched_class_highest; | 3856 | for_each_class(class) { |
| 3727 | for ( ; ; ) { | ||
| 3728 | p = class->pick_next_task(rq); | 3857 | p = class->pick_next_task(rq); |
| 3729 | if (p) | 3858 | if (p) |
| 3730 | return p; | 3859 | return p; |
| 3731 | /* | ||
| 3732 | * Will never be NULL as the idle class always | ||
| 3733 | * returns a non-NULL p: | ||
| 3734 | */ | ||
| 3735 | class = class->next; | ||
| 3736 | } | 3860 | } |
| 3861 | |||
| 3862 | BUG(); /* the idle class will always have a runnable task */ | ||
| 3737 | } | 3863 | } |
| 3738 | 3864 | ||
| 3739 | /* | 3865 | /* |
| @@ -4358,6 +4484,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
| 4358 | 4484 | ||
| 4359 | rq = task_rq_lock(p, &flags); | 4485 | rq = task_rq_lock(p, &flags); |
| 4360 | 4486 | ||
| 4487 | trace_sched_pi_setprio(p, prio); | ||
| 4361 | oldprio = p->prio; | 4488 | oldprio = p->prio; |
| 4362 | prev_class = p->sched_class; | 4489 | prev_class = p->sched_class; |
| 4363 | on_rq = p->se.on_rq; | 4490 | on_rq = p->se.on_rq; |
| @@ -4645,7 +4772,7 @@ recheck: | |||
| 4645 | } | 4772 | } |
| 4646 | 4773 | ||
| 4647 | if (user) { | 4774 | if (user) { |
| 4648 | retval = security_task_setscheduler(p, policy, param); | 4775 | retval = security_task_setscheduler(p); |
| 4649 | if (retval) | 4776 | if (retval) |
| 4650 | return retval; | 4777 | return retval; |
| 4651 | } | 4778 | } |
| @@ -4661,6 +4788,15 @@ recheck: | |||
| 4661 | */ | 4788 | */ |
| 4662 | rq = __task_rq_lock(p); | 4789 | rq = __task_rq_lock(p); |
| 4663 | 4790 | ||
| 4791 | /* | ||
| 4792 | * Changing the policy of the stop threads its a very bad idea | ||
| 4793 | */ | ||
| 4794 | if (p == rq->stop) { | ||
| 4795 | __task_rq_unlock(rq); | ||
| 4796 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
| 4797 | return -EINVAL; | ||
| 4798 | } | ||
| 4799 | |||
| 4664 | #ifdef CONFIG_RT_GROUP_SCHED | 4800 | #ifdef CONFIG_RT_GROUP_SCHED |
| 4665 | if (user) { | 4801 | if (user) { |
| 4666 | /* | 4802 | /* |
| @@ -4887,13 +5023,13 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |||
| 4887 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5023 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
| 4888 | goto out_unlock; | 5024 | goto out_unlock; |
| 4889 | 5025 | ||
| 4890 | retval = security_task_setscheduler(p, 0, NULL); | 5026 | retval = security_task_setscheduler(p); |
| 4891 | if (retval) | 5027 | if (retval) |
| 4892 | goto out_unlock; | 5028 | goto out_unlock; |
| 4893 | 5029 | ||
| 4894 | cpuset_cpus_allowed(p, cpus_allowed); | 5030 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4895 | cpumask_and(new_mask, in_mask, cpus_allowed); | 5031 | cpumask_and(new_mask, in_mask, cpus_allowed); |
| 4896 | again: | 5032 | again: |
| 4897 | retval = set_cpus_allowed_ptr(p, new_mask); | 5033 | retval = set_cpus_allowed_ptr(p, new_mask); |
| 4898 | 5034 | ||
| 4899 | if (!retval) { | 5035 | if (!retval) { |
| @@ -5337,7 +5473,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5337 | idle->se.exec_start = sched_clock(); | 5473 | idle->se.exec_start = sched_clock(); |
| 5338 | 5474 | ||
| 5339 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 5475 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
| 5476 | /* | ||
| 5477 | * We're having a chicken and egg problem, even though we are | ||
| 5478 | * holding rq->lock, the cpu isn't yet set to this cpu so the | ||
| 5479 | * lockdep check in task_group() will fail. | ||
| 5480 | * | ||
| 5481 | * Similar case to sched_fork(). / Alternatively we could | ||
| 5482 | * use task_rq_lock() here and obtain the other rq->lock. | ||
| 5483 | * | ||
| 5484 | * Silence PROVE_RCU | ||
| 5485 | */ | ||
| 5486 | rcu_read_lock(); | ||
| 5340 | __set_task_cpu(idle, cpu); | 5487 | __set_task_cpu(idle, cpu); |
| 5488 | rcu_read_unlock(); | ||
| 5341 | 5489 | ||
| 5342 | rq->curr = rq->idle = idle; | 5490 | rq->curr = rq->idle = idle; |
| 5343 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5491 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| @@ -6514,6 +6662,7 @@ struct s_data { | |||
| 6514 | cpumask_var_t nodemask; | 6662 | cpumask_var_t nodemask; |
| 6515 | cpumask_var_t this_sibling_map; | 6663 | cpumask_var_t this_sibling_map; |
| 6516 | cpumask_var_t this_core_map; | 6664 | cpumask_var_t this_core_map; |
| 6665 | cpumask_var_t this_book_map; | ||
| 6517 | cpumask_var_t send_covered; | 6666 | cpumask_var_t send_covered; |
| 6518 | cpumask_var_t tmpmask; | 6667 | cpumask_var_t tmpmask; |
| 6519 | struct sched_group **sched_group_nodes; | 6668 | struct sched_group **sched_group_nodes; |
| @@ -6525,6 +6674,7 @@ enum s_alloc { | |||
| 6525 | sa_rootdomain, | 6674 | sa_rootdomain, |
| 6526 | sa_tmpmask, | 6675 | sa_tmpmask, |
| 6527 | sa_send_covered, | 6676 | sa_send_covered, |
| 6677 | sa_this_book_map, | ||
| 6528 | sa_this_core_map, | 6678 | sa_this_core_map, |
| 6529 | sa_this_sibling_map, | 6679 | sa_this_sibling_map, |
| 6530 | sa_nodemask, | 6680 | sa_nodemask, |
| @@ -6560,31 +6710,48 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | |||
| 6560 | #ifdef CONFIG_SCHED_MC | 6710 | #ifdef CONFIG_SCHED_MC |
| 6561 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); | 6711 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
| 6562 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); | 6712 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
| 6563 | #endif /* CONFIG_SCHED_MC */ | ||
| 6564 | 6713 | ||
| 6565 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | ||
| 6566 | static int | 6714 | static int |
| 6567 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | 6715 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
| 6568 | struct sched_group **sg, struct cpumask *mask) | 6716 | struct sched_group **sg, struct cpumask *mask) |
| 6569 | { | 6717 | { |
| 6570 | int group; | 6718 | int group; |
| 6571 | 6719 | #ifdef CONFIG_SCHED_SMT | |
| 6572 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | 6720 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
| 6573 | group = cpumask_first(mask); | 6721 | group = cpumask_first(mask); |
| 6722 | #else | ||
| 6723 | group = cpu; | ||
| 6724 | #endif | ||
| 6574 | if (sg) | 6725 | if (sg) |
| 6575 | *sg = &per_cpu(sched_group_core, group).sg; | 6726 | *sg = &per_cpu(sched_group_core, group).sg; |
| 6576 | return group; | 6727 | return group; |
| 6577 | } | 6728 | } |
| 6578 | #elif defined(CONFIG_SCHED_MC) | 6729 | #endif /* CONFIG_SCHED_MC */ |
| 6730 | |||
| 6731 | /* | ||
| 6732 | * book sched-domains: | ||
| 6733 | */ | ||
| 6734 | #ifdef CONFIG_SCHED_BOOK | ||
| 6735 | static DEFINE_PER_CPU(struct static_sched_domain, book_domains); | ||
| 6736 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); | ||
| 6737 | |||
| 6579 | static int | 6738 | static int |
| 6580 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | 6739 | cpu_to_book_group(int cpu, const struct cpumask *cpu_map, |
| 6581 | struct sched_group **sg, struct cpumask *unused) | 6740 | struct sched_group **sg, struct cpumask *mask) |
| 6582 | { | 6741 | { |
| 6742 | int group = cpu; | ||
| 6743 | #ifdef CONFIG_SCHED_MC | ||
| 6744 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | ||
| 6745 | group = cpumask_first(mask); | ||
| 6746 | #elif defined(CONFIG_SCHED_SMT) | ||
| 6747 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | ||
| 6748 | group = cpumask_first(mask); | ||
| 6749 | #endif | ||
| 6583 | if (sg) | 6750 | if (sg) |
| 6584 | *sg = &per_cpu(sched_group_core, cpu).sg; | 6751 | *sg = &per_cpu(sched_group_book, group).sg; |
| 6585 | return cpu; | 6752 | return group; |
| 6586 | } | 6753 | } |
| 6587 | #endif | 6754 | #endif /* CONFIG_SCHED_BOOK */ |
| 6588 | 6755 | ||
| 6589 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); | 6756 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
| 6590 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); | 6757 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
| @@ -6594,7 +6761,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
| 6594 | struct sched_group **sg, struct cpumask *mask) | 6761 | struct sched_group **sg, struct cpumask *mask) |
| 6595 | { | 6762 | { |
| 6596 | int group; | 6763 | int group; |
| 6597 | #ifdef CONFIG_SCHED_MC | 6764 | #ifdef CONFIG_SCHED_BOOK |
| 6765 | cpumask_and(mask, cpu_book_mask(cpu), cpu_map); | ||
| 6766 | group = cpumask_first(mask); | ||
| 6767 | #elif defined(CONFIG_SCHED_MC) | ||
| 6598 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | 6768 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
| 6599 | group = cpumask_first(mask); | 6769 | group = cpumask_first(mask); |
| 6600 | #elif defined(CONFIG_SCHED_SMT) | 6770 | #elif defined(CONFIG_SCHED_SMT) |
| @@ -6855,6 +7025,9 @@ SD_INIT_FUNC(CPU) | |||
| 6855 | #ifdef CONFIG_SCHED_MC | 7025 | #ifdef CONFIG_SCHED_MC |
| 6856 | SD_INIT_FUNC(MC) | 7026 | SD_INIT_FUNC(MC) |
| 6857 | #endif | 7027 | #endif |
| 7028 | #ifdef CONFIG_SCHED_BOOK | ||
| 7029 | SD_INIT_FUNC(BOOK) | ||
| 7030 | #endif | ||
| 6858 | 7031 | ||
| 6859 | static int default_relax_domain_level = -1; | 7032 | static int default_relax_domain_level = -1; |
| 6860 | 7033 | ||
| @@ -6904,6 +7077,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
| 6904 | free_cpumask_var(d->tmpmask); /* fall through */ | 7077 | free_cpumask_var(d->tmpmask); /* fall through */ |
| 6905 | case sa_send_covered: | 7078 | case sa_send_covered: |
| 6906 | free_cpumask_var(d->send_covered); /* fall through */ | 7079 | free_cpumask_var(d->send_covered); /* fall through */ |
| 7080 | case sa_this_book_map: | ||
| 7081 | free_cpumask_var(d->this_book_map); /* fall through */ | ||
| 6907 | case sa_this_core_map: | 7082 | case sa_this_core_map: |
| 6908 | free_cpumask_var(d->this_core_map); /* fall through */ | 7083 | free_cpumask_var(d->this_core_map); /* fall through */ |
| 6909 | case sa_this_sibling_map: | 7084 | case sa_this_sibling_map: |
| @@ -6950,8 +7125,10 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
| 6950 | return sa_nodemask; | 7125 | return sa_nodemask; |
| 6951 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) | 7126 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) |
| 6952 | return sa_this_sibling_map; | 7127 | return sa_this_sibling_map; |
| 6953 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | 7128 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) |
| 6954 | return sa_this_core_map; | 7129 | return sa_this_core_map; |
| 7130 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | ||
| 7131 | return sa_this_book_map; | ||
| 6955 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | 7132 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
| 6956 | return sa_send_covered; | 7133 | return sa_send_covered; |
| 6957 | d->rd = alloc_rootdomain(); | 7134 | d->rd = alloc_rootdomain(); |
| @@ -7009,6 +7186,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, | |||
| 7009 | return sd; | 7186 | return sd; |
| 7010 | } | 7187 | } |
| 7011 | 7188 | ||
| 7189 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, | ||
| 7190 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
| 7191 | struct sched_domain *parent, int i) | ||
| 7192 | { | ||
| 7193 | struct sched_domain *sd = parent; | ||
| 7194 | #ifdef CONFIG_SCHED_BOOK | ||
| 7195 | sd = &per_cpu(book_domains, i).sd; | ||
| 7196 | SD_INIT(sd, BOOK); | ||
| 7197 | set_domain_attribute(sd, attr); | ||
| 7198 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); | ||
| 7199 | sd->parent = parent; | ||
| 7200 | parent->child = sd; | ||
| 7201 | cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
| 7202 | #endif | ||
| 7203 | return sd; | ||
| 7204 | } | ||
| 7205 | |||
| 7012 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, | 7206 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, |
| 7013 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7207 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 7014 | struct sched_domain *parent, int i) | 7208 | struct sched_domain *parent, int i) |
| @@ -7066,6 +7260,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
| 7066 | d->send_covered, d->tmpmask); | 7260 | d->send_covered, d->tmpmask); |
| 7067 | break; | 7261 | break; |
| 7068 | #endif | 7262 | #endif |
| 7263 | #ifdef CONFIG_SCHED_BOOK | ||
| 7264 | case SD_LV_BOOK: /* set up book groups */ | ||
| 7265 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); | ||
| 7266 | if (cpu == cpumask_first(d->this_book_map)) | ||
| 7267 | init_sched_build_groups(d->this_book_map, cpu_map, | ||
| 7268 | &cpu_to_book_group, | ||
| 7269 | d->send_covered, d->tmpmask); | ||
| 7270 | break; | ||
| 7271 | #endif | ||
| 7069 | case SD_LV_CPU: /* set up physical groups */ | 7272 | case SD_LV_CPU: /* set up physical groups */ |
| 7070 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); | 7273 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); |
| 7071 | if (!cpumask_empty(d->nodemask)) | 7274 | if (!cpumask_empty(d->nodemask)) |
| @@ -7113,12 +7316,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7113 | 7316 | ||
| 7114 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); | 7317 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
| 7115 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); | 7318 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
| 7319 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); | ||
| 7116 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7320 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
| 7117 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | 7321 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
| 7118 | } | 7322 | } |
| 7119 | 7323 | ||
| 7120 | for_each_cpu(i, cpu_map) { | 7324 | for_each_cpu(i, cpu_map) { |
| 7121 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); | 7325 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
| 7326 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); | ||
| 7122 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | 7327 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); |
| 7123 | } | 7328 | } |
| 7124 | 7329 | ||
| @@ -7149,6 +7354,12 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7149 | init_sched_groups_power(i, sd); | 7354 | init_sched_groups_power(i, sd); |
| 7150 | } | 7355 | } |
| 7151 | #endif | 7356 | #endif |
| 7357 | #ifdef CONFIG_SCHED_BOOK | ||
| 7358 | for_each_cpu(i, cpu_map) { | ||
| 7359 | sd = &per_cpu(book_domains, i).sd; | ||
| 7360 | init_sched_groups_power(i, sd); | ||
| 7361 | } | ||
| 7362 | #endif | ||
| 7152 | 7363 | ||
| 7153 | for_each_cpu(i, cpu_map) { | 7364 | for_each_cpu(i, cpu_map) { |
| 7154 | sd = &per_cpu(phys_domains, i).sd; | 7365 | sd = &per_cpu(phys_domains, i).sd; |
| @@ -7174,6 +7385,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7174 | sd = &per_cpu(cpu_domains, i).sd; | 7385 | sd = &per_cpu(cpu_domains, i).sd; |
| 7175 | #elif defined(CONFIG_SCHED_MC) | 7386 | #elif defined(CONFIG_SCHED_MC) |
| 7176 | sd = &per_cpu(core_domains, i).sd; | 7387 | sd = &per_cpu(core_domains, i).sd; |
| 7388 | #elif defined(CONFIG_SCHED_BOOK) | ||
| 7389 | sd = &per_cpu(book_domains, i).sd; | ||
| 7177 | #else | 7390 | #else |
| 7178 | sd = &per_cpu(phys_domains, i).sd; | 7391 | sd = &per_cpu(phys_domains, i).sd; |
| 7179 | #endif | 7392 | #endif |
| @@ -8078,9 +8291,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8078 | 8291 | ||
| 8079 | return 1; | 8292 | return 1; |
| 8080 | 8293 | ||
| 8081 | err_free_rq: | 8294 | err_free_rq: |
| 8082 | kfree(cfs_rq); | 8295 | kfree(cfs_rq); |
| 8083 | err: | 8296 | err: |
| 8084 | return 0; | 8297 | return 0; |
| 8085 | } | 8298 | } |
| 8086 | 8299 | ||
| @@ -8168,9 +8381,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8168 | 8381 | ||
| 8169 | return 1; | 8382 | return 1; |
| 8170 | 8383 | ||
| 8171 | err_free_rq: | 8384 | err_free_rq: |
| 8172 | kfree(rt_rq); | 8385 | kfree(rt_rq); |
| 8173 | err: | 8386 | err: |
| 8174 | return 0; | 8387 | return 0; |
| 8175 | } | 8388 | } |
| 8176 | 8389 | ||
| @@ -8528,7 +8741,7 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
| 8528 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 8741 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 8529 | } | 8742 | } |
| 8530 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 8743 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
| 8531 | unlock: | 8744 | unlock: |
| 8532 | read_unlock(&tasklist_lock); | 8745 | read_unlock(&tasklist_lock); |
| 8533 | mutex_unlock(&rt_constraints_mutex); | 8746 | mutex_unlock(&rt_constraints_mutex); |
| 8534 | 8747 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9b5b4f86b742..933f3d1b62ea 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * Targeted preemption latency for CPU-bound tasks: | 27 | * Targeted preemption latency for CPU-bound tasks: |
| 28 | * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds) | 28 | * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) |
| 29 | * | 29 | * |
| 30 | * NOTE: this latency value is not the same as the concept of | 30 | * NOTE: this latency value is not the same as the concept of |
| 31 | * 'timeslice length' - timeslices in CFS are of variable length | 31 | * 'timeslice length' - timeslices in CFS are of variable length |
| @@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
| 52 | 52 | ||
| 53 | /* | 53 | /* |
| 54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
| 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) |
| 56 | */ | 56 | */ |
| 57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
| 58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
| 59 | 59 | ||
| 60 | /* | 60 | /* |
| 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
| 62 | */ | 62 | */ |
| 63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
| @@ -519,7 +519,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
| 519 | static void update_curr(struct cfs_rq *cfs_rq) | 519 | static void update_curr(struct cfs_rq *cfs_rq) |
| 520 | { | 520 | { |
| 521 | struct sched_entity *curr = cfs_rq->curr; | 521 | struct sched_entity *curr = cfs_rq->curr; |
| 522 | u64 now = rq_of(cfs_rq)->clock; | 522 | u64 now = rq_of(cfs_rq)->clock_task; |
| 523 | unsigned long delta_exec; | 523 | unsigned long delta_exec; |
| 524 | 524 | ||
| 525 | if (unlikely(!curr)) | 525 | if (unlikely(!curr)) |
| @@ -602,7 +602,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 602 | /* | 602 | /* |
| 603 | * We are starting a new run period: | 603 | * We are starting a new run period: |
| 604 | */ | 604 | */ |
| 605 | se->exec_start = rq_of(cfs_rq)->clock; | 605 | se->exec_start = rq_of(cfs_rq)->clock_task; |
| 606 | } | 606 | } |
| 607 | 607 | ||
| 608 | /************************************************** | 608 | /************************************************** |
| @@ -1764,6 +1764,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
| 1764 | set_task_cpu(p, this_cpu); | 1764 | set_task_cpu(p, this_cpu); |
| 1765 | activate_task(this_rq, p, 0); | 1765 | activate_task(this_rq, p, 0); |
| 1766 | check_preempt_curr(this_rq, p, 0); | 1766 | check_preempt_curr(this_rq, p, 0); |
| 1767 | |||
| 1768 | /* re-arm NEWIDLE balancing when moving tasks */ | ||
| 1769 | src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; | ||
| 1770 | this_rq->idle_stamp = 0; | ||
| 1767 | } | 1771 | } |
| 1768 | 1772 | ||
| 1769 | /* | 1773 | /* |
| @@ -1798,7 +1802,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
| 1798 | * 2) too many balance attempts have failed. | 1802 | * 2) too many balance attempts have failed. |
| 1799 | */ | 1803 | */ |
| 1800 | 1804 | ||
| 1801 | tsk_cache_hot = task_hot(p, rq->clock, sd); | 1805 | tsk_cache_hot = task_hot(p, rq->clock_task, sd); |
| 1802 | if (!tsk_cache_hot || | 1806 | if (!tsk_cache_hot || |
| 1803 | sd->nr_balance_failed > sd->cache_nice_tries) { | 1807 | sd->nr_balance_failed > sd->cache_nice_tries) { |
| 1804 | #ifdef CONFIG_SCHEDSTATS | 1808 | #ifdef CONFIG_SCHEDSTATS |
| @@ -2030,12 +2034,14 @@ struct sd_lb_stats { | |||
| 2030 | unsigned long this_load; | 2034 | unsigned long this_load; |
| 2031 | unsigned long this_load_per_task; | 2035 | unsigned long this_load_per_task; |
| 2032 | unsigned long this_nr_running; | 2036 | unsigned long this_nr_running; |
| 2037 | unsigned long this_has_capacity; | ||
| 2033 | 2038 | ||
| 2034 | /* Statistics of the busiest group */ | 2039 | /* Statistics of the busiest group */ |
| 2035 | unsigned long max_load; | 2040 | unsigned long max_load; |
| 2036 | unsigned long busiest_load_per_task; | 2041 | unsigned long busiest_load_per_task; |
| 2037 | unsigned long busiest_nr_running; | 2042 | unsigned long busiest_nr_running; |
| 2038 | unsigned long busiest_group_capacity; | 2043 | unsigned long busiest_group_capacity; |
| 2044 | unsigned long busiest_has_capacity; | ||
| 2039 | 2045 | ||
| 2040 | int group_imb; /* Is there imbalance in this sd */ | 2046 | int group_imb; /* Is there imbalance in this sd */ |
| 2041 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2047 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
| @@ -2058,6 +2064,7 @@ struct sg_lb_stats { | |||
| 2058 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 2064 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
| 2059 | unsigned long group_capacity; | 2065 | unsigned long group_capacity; |
| 2060 | int group_imb; /* Is there an imbalance in the group ? */ | 2066 | int group_imb; /* Is there an imbalance in the group ? */ |
| 2067 | int group_has_capacity; /* Is there extra capacity in the group? */ | ||
| 2061 | }; | 2068 | }; |
| 2062 | 2069 | ||
| 2063 | /** | 2070 | /** |
| @@ -2268,7 +2275,13 @@ unsigned long scale_rt_power(int cpu) | |||
| 2268 | u64 total, available; | 2275 | u64 total, available; |
| 2269 | 2276 | ||
| 2270 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 2277 | total = sched_avg_period() + (rq->clock - rq->age_stamp); |
| 2271 | available = total - rq->rt_avg; | 2278 | |
| 2279 | if (unlikely(total < rq->rt_avg)) { | ||
| 2280 | /* Ensures that power won't end up being negative */ | ||
| 2281 | available = 0; | ||
| 2282 | } else { | ||
| 2283 | available = total - rq->rt_avg; | ||
| 2284 | } | ||
| 2272 | 2285 | ||
| 2273 | if (unlikely((s64)total < SCHED_LOAD_SCALE)) | 2286 | if (unlikely((s64)total < SCHED_LOAD_SCALE)) |
| 2274 | total = SCHED_LOAD_SCALE; | 2287 | total = SCHED_LOAD_SCALE; |
| @@ -2378,7 +2391,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2378 | int local_group, const struct cpumask *cpus, | 2391 | int local_group, const struct cpumask *cpus, |
| 2379 | int *balance, struct sg_lb_stats *sgs) | 2392 | int *balance, struct sg_lb_stats *sgs) |
| 2380 | { | 2393 | { |
| 2381 | unsigned long load, max_cpu_load, min_cpu_load; | 2394 | unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; |
| 2382 | int i; | 2395 | int i; |
| 2383 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | 2396 | unsigned int balance_cpu = -1, first_idle_cpu = 0; |
| 2384 | unsigned long avg_load_per_task = 0; | 2397 | unsigned long avg_load_per_task = 0; |
| @@ -2389,6 +2402,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2389 | /* Tally up the load of all CPUs in the group */ | 2402 | /* Tally up the load of all CPUs in the group */ |
| 2390 | max_cpu_load = 0; | 2403 | max_cpu_load = 0; |
| 2391 | min_cpu_load = ~0UL; | 2404 | min_cpu_load = ~0UL; |
| 2405 | max_nr_running = 0; | ||
| 2392 | 2406 | ||
| 2393 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | 2407 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
| 2394 | struct rq *rq = cpu_rq(i); | 2408 | struct rq *rq = cpu_rq(i); |
| @@ -2406,8 +2420,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2406 | load = target_load(i, load_idx); | 2420 | load = target_load(i, load_idx); |
| 2407 | } else { | 2421 | } else { |
| 2408 | load = source_load(i, load_idx); | 2422 | load = source_load(i, load_idx); |
| 2409 | if (load > max_cpu_load) | 2423 | if (load > max_cpu_load) { |
| 2410 | max_cpu_load = load; | 2424 | max_cpu_load = load; |
| 2425 | max_nr_running = rq->nr_running; | ||
| 2426 | } | ||
| 2411 | if (min_cpu_load > load) | 2427 | if (min_cpu_load > load) |
| 2412 | min_cpu_load = load; | 2428 | min_cpu_load = load; |
| 2413 | } | 2429 | } |
| @@ -2447,13 +2463,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2447 | if (sgs->sum_nr_running) | 2463 | if (sgs->sum_nr_running) |
| 2448 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 2464 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
| 2449 | 2465 | ||
| 2450 | if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) | 2466 | if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1) |
| 2451 | sgs->group_imb = 1; | 2467 | sgs->group_imb = 1; |
| 2452 | 2468 | ||
| 2453 | sgs->group_capacity = | 2469 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); |
| 2454 | DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); | ||
| 2455 | if (!sgs->group_capacity) | 2470 | if (!sgs->group_capacity) |
| 2456 | sgs->group_capacity = fix_small_capacity(sd, group); | 2471 | sgs->group_capacity = fix_small_capacity(sd, group); |
| 2472 | |||
| 2473 | if (sgs->group_capacity > sgs->sum_nr_running) | ||
| 2474 | sgs->group_has_capacity = 1; | ||
| 2457 | } | 2475 | } |
| 2458 | 2476 | ||
| 2459 | /** | 2477 | /** |
| @@ -2542,9 +2560,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
| 2542 | /* | 2560 | /* |
| 2543 | * In case the child domain prefers tasks go to siblings | 2561 | * In case the child domain prefers tasks go to siblings |
| 2544 | * first, lower the sg capacity to one so that we'll try | 2562 | * first, lower the sg capacity to one so that we'll try |
| 2545 | * and move all the excess tasks away. | 2563 | * and move all the excess tasks away. We lower the capacity |
| 2564 | * of a group only if the local group has the capacity to fit | ||
| 2565 | * these excess tasks, i.e. nr_running < group_capacity. The | ||
| 2566 | * extra check prevents the case where you always pull from the | ||
| 2567 | * heaviest group when it is already under-utilized (possible | ||
| 2568 | * with a large weight task outweighs the tasks on the system). | ||
| 2546 | */ | 2569 | */ |
| 2547 | if (prefer_sibling) | 2570 | if (prefer_sibling && !local_group && sds->this_has_capacity) |
| 2548 | sgs.group_capacity = min(sgs.group_capacity, 1UL); | 2571 | sgs.group_capacity = min(sgs.group_capacity, 1UL); |
| 2549 | 2572 | ||
| 2550 | if (local_group) { | 2573 | if (local_group) { |
| @@ -2552,12 +2575,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
| 2552 | sds->this = sg; | 2575 | sds->this = sg; |
| 2553 | sds->this_nr_running = sgs.sum_nr_running; | 2576 | sds->this_nr_running = sgs.sum_nr_running; |
| 2554 | sds->this_load_per_task = sgs.sum_weighted_load; | 2577 | sds->this_load_per_task = sgs.sum_weighted_load; |
| 2578 | sds->this_has_capacity = sgs.group_has_capacity; | ||
| 2555 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 2579 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { |
| 2556 | sds->max_load = sgs.avg_load; | 2580 | sds->max_load = sgs.avg_load; |
| 2557 | sds->busiest = sg; | 2581 | sds->busiest = sg; |
| 2558 | sds->busiest_nr_running = sgs.sum_nr_running; | 2582 | sds->busiest_nr_running = sgs.sum_nr_running; |
| 2559 | sds->busiest_group_capacity = sgs.group_capacity; | 2583 | sds->busiest_group_capacity = sgs.group_capacity; |
| 2560 | sds->busiest_load_per_task = sgs.sum_weighted_load; | 2584 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
| 2585 | sds->busiest_has_capacity = sgs.group_has_capacity; | ||
| 2561 | sds->group_imb = sgs.group_imb; | 2586 | sds->group_imb = sgs.group_imb; |
| 2562 | } | 2587 | } |
| 2563 | 2588 | ||
| @@ -2754,6 +2779,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | |||
| 2754 | return fix_small_imbalance(sds, this_cpu, imbalance); | 2779 | return fix_small_imbalance(sds, this_cpu, imbalance); |
| 2755 | 2780 | ||
| 2756 | } | 2781 | } |
| 2782 | |||
| 2757 | /******* find_busiest_group() helpers end here *********************/ | 2783 | /******* find_busiest_group() helpers end here *********************/ |
| 2758 | 2784 | ||
| 2759 | /** | 2785 | /** |
| @@ -2805,6 +2831,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 2805 | * 4) This group is more busy than the avg busieness at this | 2831 | * 4) This group is more busy than the avg busieness at this |
| 2806 | * sched_domain. | 2832 | * sched_domain. |
| 2807 | * 5) The imbalance is within the specified limit. | 2833 | * 5) The imbalance is within the specified limit. |
| 2834 | * | ||
| 2835 | * Note: when doing newidle balance, if the local group has excess | ||
| 2836 | * capacity (i.e. nr_running < group_capacity) and the busiest group | ||
| 2837 | * does not have any capacity, we force a load balance to pull tasks | ||
| 2838 | * to the local group. In this case, we skip past checks 3, 4 and 5. | ||
| 2808 | */ | 2839 | */ |
| 2809 | if (!(*balance)) | 2840 | if (!(*balance)) |
| 2810 | goto ret; | 2841 | goto ret; |
| @@ -2816,6 +2847,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 2816 | if (!sds.busiest || sds.busiest_nr_running == 0) | 2847 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 2817 | goto out_balanced; | 2848 | goto out_balanced; |
| 2818 | 2849 | ||
| 2850 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ | ||
| 2851 | if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && | ||
| 2852 | !sds.busiest_has_capacity) | ||
| 2853 | goto force_balance; | ||
| 2854 | |||
| 2819 | if (sds.this_load >= sds.max_load) | 2855 | if (sds.this_load >= sds.max_load) |
| 2820 | goto out_balanced; | 2856 | goto out_balanced; |
| 2821 | 2857 | ||
| @@ -2827,6 +2863,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 2827 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 2863 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) |
| 2828 | goto out_balanced; | 2864 | goto out_balanced; |
| 2829 | 2865 | ||
| 2866 | force_balance: | ||
| 2830 | /* Looks like there is an imbalance. Compute it */ | 2867 | /* Looks like there is an imbalance. Compute it */ |
| 2831 | calculate_imbalance(&sds, this_cpu, imbalance); | 2868 | calculate_imbalance(&sds, this_cpu, imbalance); |
| 2832 | return sds.busiest; | 2869 | return sds.busiest; |
| @@ -3031,7 +3068,14 @@ redo: | |||
| 3031 | 3068 | ||
| 3032 | if (!ld_moved) { | 3069 | if (!ld_moved) { |
| 3033 | schedstat_inc(sd, lb_failed[idle]); | 3070 | schedstat_inc(sd, lb_failed[idle]); |
| 3034 | sd->nr_balance_failed++; | 3071 | /* |
| 3072 | * Increment the failure counter only on periodic balance. | ||
| 3073 | * We do not want newidle balance, which can be very | ||
| 3074 | * frequent, pollute the failure counter causing | ||
| 3075 | * excessive cache_hot migrations and active balances. | ||
| 3076 | */ | ||
| 3077 | if (idle != CPU_NEWLY_IDLE) | ||
| 3078 | sd->nr_balance_failed++; | ||
| 3035 | 3079 | ||
| 3036 | if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest), | 3080 | if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest), |
| 3037 | this_cpu)) { | 3081 | this_cpu)) { |
| @@ -3153,10 +3197,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
| 3153 | interval = msecs_to_jiffies(sd->balance_interval); | 3197 | interval = msecs_to_jiffies(sd->balance_interval); |
| 3154 | if (time_after(next_balance, sd->last_balance + interval)) | 3198 | if (time_after(next_balance, sd->last_balance + interval)) |
| 3155 | next_balance = sd->last_balance + interval; | 3199 | next_balance = sd->last_balance + interval; |
| 3156 | if (pulled_task) { | 3200 | if (pulled_task) |
| 3157 | this_rq->idle_stamp = 0; | ||
| 3158 | break; | 3201 | break; |
| 3159 | } | ||
| 3160 | } | 3202 | } |
| 3161 | 3203 | ||
| 3162 | raw_spin_lock(&this_rq->lock); | 3204 | raw_spin_lock(&this_rq->lock); |
| @@ -3630,7 +3672,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
| 3630 | if (time_before(now, nohz.next_balance)) | 3672 | if (time_before(now, nohz.next_balance)) |
| 3631 | return 0; | 3673 | return 0; |
| 3632 | 3674 | ||
| 3633 | if (!rq->nr_running) | 3675 | if (rq->idle_at_tick) |
| 3634 | return 0; | 3676 | return 0; |
| 3635 | 3677 | ||
| 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3678 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |
| @@ -3751,8 +3793,11 @@ static void task_fork_fair(struct task_struct *p) | |||
| 3751 | 3793 | ||
| 3752 | update_rq_clock(rq); | 3794 | update_rq_clock(rq); |
| 3753 | 3795 | ||
| 3754 | if (unlikely(task_cpu(p) != this_cpu)) | 3796 | if (unlikely(task_cpu(p) != this_cpu)) { |
| 3797 | rcu_read_lock(); | ||
| 3755 | __set_task_cpu(p, this_cpu); | 3798 | __set_task_cpu(p, this_cpu); |
| 3799 | rcu_read_unlock(); | ||
| 3800 | } | ||
| 3756 | 3801 | ||
| 3757 | update_curr(cfs_rq); | 3802 | update_curr(cfs_rq); |
| 3758 | 3803 | ||
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 83c66e8ad3ee..185f920ec1a2 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
| @@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1) | |||
| 61 | * release the lock. Decreases scheduling overhead. | 61 | * release the lock. Decreases scheduling overhead. |
| 62 | */ | 62 | */ |
| 63 | SCHED_FEAT(OWNER_SPIN, 1) | 63 | SCHED_FEAT(OWNER_SPIN, 1) |
| 64 | |||
| 65 | /* | ||
| 66 | * Decrement CPU power based on irq activity | ||
| 67 | */ | ||
| 68 | SCHED_FEAT(NONIRQ_POWER, 1) | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d10c80ebb67a..bea7d79f7e9c 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -609,7 +609,7 @@ static void update_curr_rt(struct rq *rq) | |||
| 609 | if (!task_has_rt_policy(curr)) | 609 | if (!task_has_rt_policy(curr)) |
| 610 | return; | 610 | return; |
| 611 | 611 | ||
| 612 | delta_exec = rq->clock - curr->se.exec_start; | 612 | delta_exec = rq->clock_task - curr->se.exec_start; |
| 613 | if (unlikely((s64)delta_exec < 0)) | 613 | if (unlikely((s64)delta_exec < 0)) |
| 614 | delta_exec = 0; | 614 | delta_exec = 0; |
| 615 | 615 | ||
| @@ -618,7 +618,7 @@ static void update_curr_rt(struct rq *rq) | |||
| 618 | curr->se.sum_exec_runtime += delta_exec; | 618 | curr->se.sum_exec_runtime += delta_exec; |
| 619 | account_group_exec_runtime(curr, delta_exec); | 619 | account_group_exec_runtime(curr, delta_exec); |
| 620 | 620 | ||
| 621 | curr->se.exec_start = rq->clock; | 621 | curr->se.exec_start = rq->clock_task; |
| 622 | cpuacct_charge(curr, delta_exec); | 622 | cpuacct_charge(curr, delta_exec); |
| 623 | 623 | ||
| 624 | sched_rt_avg_update(rq, delta_exec); | 624 | sched_rt_avg_update(rq, delta_exec); |
| @@ -960,18 +960,19 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | |||
| 960 | * runqueue. Otherwise simply start this RT task | 960 | * runqueue. Otherwise simply start this RT task |
| 961 | * on its current runqueue. | 961 | * on its current runqueue. |
| 962 | * | 962 | * |
| 963 | * We want to avoid overloading runqueues. Even if | 963 | * We want to avoid overloading runqueues. If the woken |
| 964 | * the RT task is of higher priority than the current RT task. | 964 | * task is a higher priority, then it will stay on this CPU |
| 965 | * RT tasks behave differently than other tasks. If | 965 | * and the lower prio task should be moved to another CPU. |
| 966 | * one gets preempted, we try to push it off to another queue. | 966 | * Even though this will probably make the lower prio task |
| 967 | * So trying to keep a preempting RT task on the same | 967 | * lose its cache, we do not want to bounce a higher task |
| 968 | * cache hot CPU will force the running RT task to | 968 | * around just because it gave up its CPU, perhaps for a |
| 969 | * a cold CPU. So we waste all the cache for the lower | 969 | * lock? |
| 970 | * RT task in hopes of saving some of a RT task | 970 | * |
| 971 | * that is just being woken and probably will have | 971 | * For equal prio tasks, we just let the scheduler sort it out. |
| 972 | * cold cache anyway. | ||
| 973 | */ | 972 | */ |
| 974 | if (unlikely(rt_task(rq->curr)) && | 973 | if (unlikely(rt_task(rq->curr)) && |
| 974 | (rq->curr->rt.nr_cpus_allowed < 2 || | ||
| 975 | rq->curr->prio < p->prio) && | ||
| 975 | (p->rt.nr_cpus_allowed > 1)) { | 976 | (p->rt.nr_cpus_allowed > 1)) { |
| 976 | int cpu = find_lowest_rq(p); | 977 | int cpu = find_lowest_rq(p); |
| 977 | 978 | ||
| @@ -1074,7 +1075,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
| 1074 | } while (rt_rq); | 1075 | } while (rt_rq); |
| 1075 | 1076 | ||
| 1076 | p = rt_task_of(rt_se); | 1077 | p = rt_task_of(rt_se); |
| 1077 | p->se.exec_start = rq->clock; | 1078 | p->se.exec_start = rq->clock_task; |
| 1078 | 1079 | ||
| 1079 | return p; | 1080 | return p; |
| 1080 | } | 1081 | } |
| @@ -1139,7 +1140,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
| 1139 | for_each_leaf_rt_rq(rt_rq, rq) { | 1140 | for_each_leaf_rt_rq(rt_rq, rq) { |
| 1140 | array = &rt_rq->active; | 1141 | array = &rt_rq->active; |
| 1141 | idx = sched_find_first_bit(array->bitmap); | 1142 | idx = sched_find_first_bit(array->bitmap); |
| 1142 | next_idx: | 1143 | next_idx: |
| 1143 | if (idx >= MAX_RT_PRIO) | 1144 | if (idx >= MAX_RT_PRIO) |
| 1144 | continue; | 1145 | continue; |
| 1145 | if (next && next->prio < idx) | 1146 | if (next && next->prio < idx) |
| @@ -1315,7 +1316,7 @@ static int push_rt_task(struct rq *rq) | |||
| 1315 | if (!next_task) | 1316 | if (!next_task) |
| 1316 | return 0; | 1317 | return 0; |
| 1317 | 1318 | ||
| 1318 | retry: | 1319 | retry: |
| 1319 | if (unlikely(next_task == rq->curr)) { | 1320 | if (unlikely(next_task == rq->curr)) { |
| 1320 | WARN_ON(1); | 1321 | WARN_ON(1); |
| 1321 | return 0; | 1322 | return 0; |
| @@ -1463,7 +1464,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 1463 | * but possible) | 1464 | * but possible) |
| 1464 | */ | 1465 | */ |
| 1465 | } | 1466 | } |
| 1466 | skip: | 1467 | skip: |
| 1467 | double_unlock_balance(this_rq, src_rq); | 1468 | double_unlock_balance(this_rq, src_rq); |
| 1468 | } | 1469 | } |
| 1469 | 1470 | ||
| @@ -1491,7 +1492,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 1491 | if (!task_running(rq, p) && | 1492 | if (!task_running(rq, p) && |
| 1492 | !test_tsk_need_resched(rq->curr) && | 1493 | !test_tsk_need_resched(rq->curr) && |
| 1493 | has_pushable_tasks(rq) && | 1494 | has_pushable_tasks(rq) && |
| 1494 | p->rt.nr_cpus_allowed > 1) | 1495 | p->rt.nr_cpus_allowed > 1 && |
| 1496 | rt_task(rq->curr) && | ||
| 1497 | (rq->curr->rt.nr_cpus_allowed < 2 || | ||
| 1498 | rq->curr->prio < p->prio)) | ||
| 1495 | push_rt_tasks(rq); | 1499 | push_rt_tasks(rq); |
| 1496 | } | 1500 | } |
| 1497 | 1501 | ||
| @@ -1709,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq) | |||
| 1709 | { | 1713 | { |
| 1710 | struct task_struct *p = rq->curr; | 1714 | struct task_struct *p = rq->curr; |
| 1711 | 1715 | ||
| 1712 | p->se.exec_start = rq->clock; | 1716 | p->se.exec_start = rq->clock_task; |
| 1713 | 1717 | ||
| 1714 | /* The running task is never eligible for pushing */ | 1718 | /* The running task is never eligible for pushing */ |
| 1715 | dequeue_pushable_task(rq, p); | 1719 | dequeue_pushable_task(rq, p); |
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c new file mode 100644 index 000000000000..45bddc0c1048 --- /dev/null +++ b/kernel/sched_stoptask.c | |||
| @@ -0,0 +1,108 @@ | |||
| 1 | /* | ||
| 2 | * stop-task scheduling class. | ||
| 3 | * | ||
| 4 | * The stop task is the highest priority task in the system, it preempts | ||
| 5 | * everything and will be preempted by nothing. | ||
| 6 | * | ||
| 7 | * See kernel/stop_machine.c | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifdef CONFIG_SMP | ||
| 11 | static int | ||
| 12 | select_task_rq_stop(struct rq *rq, struct task_struct *p, | ||
| 13 | int sd_flag, int flags) | ||
| 14 | { | ||
| 15 | return task_cpu(p); /* stop tasks as never migrate */ | ||
| 16 | } | ||
| 17 | #endif /* CONFIG_SMP */ | ||
| 18 | |||
| 19 | static void | ||
| 20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | ||
| 21 | { | ||
| 22 | resched_task(rq->curr); /* we preempt everything */ | ||
| 23 | } | ||
| 24 | |||
| 25 | static struct task_struct *pick_next_task_stop(struct rq *rq) | ||
| 26 | { | ||
| 27 | struct task_struct *stop = rq->stop; | ||
| 28 | |||
| 29 | if (stop && stop->state == TASK_RUNNING) | ||
| 30 | return stop; | ||
| 31 | |||
| 32 | return NULL; | ||
| 33 | } | ||
| 34 | |||
| 35 | static void | ||
| 36 | enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||
| 37 | { | ||
| 38 | } | ||
| 39 | |||
| 40 | static void | ||
| 41 | dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) | ||
| 42 | { | ||
| 43 | } | ||
| 44 | |||
| 45 | static void yield_task_stop(struct rq *rq) | ||
| 46 | { | ||
| 47 | BUG(); /* the stop task should never yield, its pointless. */ | ||
| 48 | } | ||
| 49 | |||
| 50 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | ||
| 51 | { | ||
| 52 | } | ||
| 53 | |||
| 54 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | ||
| 55 | { | ||
| 56 | } | ||
| 57 | |||
| 58 | static void set_curr_task_stop(struct rq *rq) | ||
| 59 | { | ||
| 60 | } | ||
| 61 | |||
| 62 | static void switched_to_stop(struct rq *rq, struct task_struct *p, | ||
| 63 | int running) | ||
| 64 | { | ||
| 65 | BUG(); /* its impossible to change to this class */ | ||
| 66 | } | ||
| 67 | |||
| 68 | static void prio_changed_stop(struct rq *rq, struct task_struct *p, | ||
| 69 | int oldprio, int running) | ||
| 70 | { | ||
| 71 | BUG(); /* how!?, what priority? */ | ||
| 72 | } | ||
| 73 | |||
| 74 | static unsigned int | ||
| 75 | get_rr_interval_stop(struct rq *rq, struct task_struct *task) | ||
| 76 | { | ||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Simple, special scheduling class for the per-CPU stop tasks: | ||
| 82 | */ | ||
| 83 | static const struct sched_class stop_sched_class = { | ||
| 84 | .next = &rt_sched_class, | ||
| 85 | |||
| 86 | .enqueue_task = enqueue_task_stop, | ||
| 87 | .dequeue_task = dequeue_task_stop, | ||
| 88 | .yield_task = yield_task_stop, | ||
| 89 | |||
| 90 | .check_preempt_curr = check_preempt_curr_stop, | ||
| 91 | |||
| 92 | .pick_next_task = pick_next_task_stop, | ||
| 93 | .put_prev_task = put_prev_task_stop, | ||
| 94 | |||
| 95 | #ifdef CONFIG_SMP | ||
| 96 | .select_task_rq = select_task_rq_stop, | ||
| 97 | #endif | ||
| 98 | |||
| 99 | .set_curr_task = set_curr_task_stop, | ||
| 100 | .task_tick = task_tick_stop, | ||
| 101 | |||
| 102 | .get_rr_interval = get_rr_interval_stop, | ||
| 103 | |||
| 104 | .prio_changed = prio_changed_stop, | ||
| 105 | .switched_to = switched_to_stop, | ||
| 106 | |||
| 107 | /* no .task_new for stop tasks */ | ||
| 108 | }; | ||
diff --git a/kernel/signal.c b/kernel/signal.c index bded65187780..919562c3d6b7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2215,6 +2215,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | |||
| 2215 | #ifdef __ARCH_SI_TRAPNO | 2215 | #ifdef __ARCH_SI_TRAPNO |
| 2216 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2216 | err |= __put_user(from->si_trapno, &to->si_trapno); |
| 2217 | #endif | 2217 | #endif |
| 2218 | #ifdef BUS_MCEERR_AO | ||
| 2219 | /* | ||
| 2220 | * Other callers might not initialize the si_lsb field, | ||
| 2221 | * so check explicitely for the right codes here. | ||
| 2222 | */ | ||
| 2223 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | ||
| 2224 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | ||
| 2225 | #endif | ||
| 2218 | break; | 2226 | break; |
| 2219 | case __SI_CHLD: | 2227 | case __SI_CHLD: |
| 2220 | err |= __put_user(from->si_pid, &to->si_pid); | 2228 | err |= __put_user(from->si_pid, &to->si_pid); |
diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c715d3..ed6aacfcb7ef 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -365,9 +365,10 @@ call: | |||
| 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
| 366 | 366 | ||
| 367 | /** | 367 | /** |
| 368 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on a specific CPU |
| 369 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
| 370 | * @data: Pre-allocated and setup data structure | 370 | * @data: Pre-allocated and setup data structure |
| 371 | * @wait: If true, wait until function has completed on specified CPU. | ||
| 371 | * | 372 | * |
| 372 | * Like smp_call_function_single(), but allow caller to pass in a | 373 | * Like smp_call_function_single(), but allow caller to pass in a |
| 373 | * pre-allocated data structure. Useful for embedding @data inside | 374 | * pre-allocated data structure. Useful for embedding @data inside |
| @@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
| 376 | void __smp_call_function_single(int cpu, struct call_single_data *data, | 377 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
| 377 | int wait) | 378 | int wait) |
| 378 | { | 379 | { |
| 379 | csd_lock(data); | 380 | unsigned int this_cpu; |
| 381 | unsigned long flags; | ||
| 380 | 382 | ||
| 383 | this_cpu = get_cpu(); | ||
| 381 | /* | 384 | /* |
| 382 | * Can deadlock when called with interrupts disabled. | 385 | * Can deadlock when called with interrupts disabled. |
| 383 | * We allow cpu's that are not yet online though, as no one else can | 386 | * We allow cpu's that are not yet online though, as no one else can |
| @@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 387 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 390 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() |
| 388 | && !oops_in_progress); | 391 | && !oops_in_progress); |
| 389 | 392 | ||
| 390 | generic_exec_single(cpu, data, wait); | 393 | if (cpu == this_cpu) { |
| 394 | local_irq_save(flags); | ||
| 395 | data->func(data->info); | ||
| 396 | local_irq_restore(flags); | ||
| 397 | } else { | ||
| 398 | csd_lock(data); | ||
| 399 | generic_exec_single(cpu, data, wait); | ||
| 400 | } | ||
| 401 | put_cpu(); | ||
| 391 | } | 402 | } |
| 392 | 403 | ||
| 393 | /** | 404 | /** |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73a..79ee8f1fc0e7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -77,11 +77,21 @@ void wakeup_softirqd(void) | |||
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
| 80 | * preempt_count and SOFTIRQ_OFFSET usage: | ||
| 81 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | ||
| 82 | * softirq processing. | ||
| 83 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | ||
| 84 | * on local_bh_disable or local_bh_enable. | ||
| 85 | * This lets us distinguish between whether we are currently processing | ||
| 86 | * softirq and whether we just have bh disabled. | ||
| 87 | */ | ||
| 88 | |||
| 89 | /* | ||
| 80 | * This one is for softirq.c-internal use, | 90 | * This one is for softirq.c-internal use, |
| 81 | * where hardirqs are disabled legitimately: | 91 | * where hardirqs are disabled legitimately: |
| 82 | */ | 92 | */ |
| 83 | #ifdef CONFIG_TRACE_IRQFLAGS | 93 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 84 | static void __local_bh_disable(unsigned long ip) | 94 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) |
| 85 | { | 95 | { |
| 86 | unsigned long flags; | 96 | unsigned long flags; |
| 87 | 97 | ||
| @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip) | |||
| 95 | * We must manually increment preempt_count here and manually | 105 | * We must manually increment preempt_count here and manually |
| 96 | * call the trace_preempt_off later. | 106 | * call the trace_preempt_off later. |
| 97 | */ | 107 | */ |
| 98 | preempt_count() += SOFTIRQ_OFFSET; | 108 | preempt_count() += cnt; |
| 99 | /* | 109 | /* |
| 100 | * Were softirqs turned off above: | 110 | * Were softirqs turned off above: |
| 101 | */ | 111 | */ |
| 102 | if (softirq_count() == SOFTIRQ_OFFSET) | 112 | if (softirq_count() == cnt) |
| 103 | trace_softirqs_off(ip); | 113 | trace_softirqs_off(ip); |
| 104 | raw_local_irq_restore(flags); | 114 | raw_local_irq_restore(flags); |
| 105 | 115 | ||
| 106 | if (preempt_count() == SOFTIRQ_OFFSET) | 116 | if (preempt_count() == cnt) |
| 107 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 117 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
| 108 | } | 118 | } |
| 109 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 119 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 110 | static inline void __local_bh_disable(unsigned long ip) | 120 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
| 111 | { | 121 | { |
| 112 | add_preempt_count(SOFTIRQ_OFFSET); | 122 | add_preempt_count(cnt); |
| 113 | barrier(); | 123 | barrier(); |
| 114 | } | 124 | } |
| 115 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 125 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 116 | 126 | ||
| 117 | void local_bh_disable(void) | 127 | void local_bh_disable(void) |
| 118 | { | 128 | { |
| 119 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 129 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 130 | SOFTIRQ_DISABLE_OFFSET); | ||
| 120 | } | 131 | } |
| 121 | 132 | ||
| 122 | EXPORT_SYMBOL(local_bh_disable); | 133 | EXPORT_SYMBOL(local_bh_disable); |
| 123 | 134 | ||
| 135 | static void __local_bh_enable(unsigned int cnt) | ||
| 136 | { | ||
| 137 | WARN_ON_ONCE(in_irq()); | ||
| 138 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 139 | |||
| 140 | if (softirq_count() == cnt) | ||
| 141 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 142 | sub_preempt_count(cnt); | ||
| 143 | } | ||
| 144 | |||
| 124 | /* | 145 | /* |
| 125 | * Special-case - softirqs can safely be enabled in | 146 | * Special-case - softirqs can safely be enabled in |
| 126 | * cond_resched_softirq(), or by __do_softirq(), | 147 | * cond_resched_softirq(), or by __do_softirq(), |
| @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable); | |||
| 128 | */ | 149 | */ |
| 129 | void _local_bh_enable(void) | 150 | void _local_bh_enable(void) |
| 130 | { | 151 | { |
| 131 | WARN_ON_ONCE(in_irq()); | 152 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
| 132 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 133 | |||
| 134 | if (softirq_count() == SOFTIRQ_OFFSET) | ||
| 135 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | ||
| 136 | sub_preempt_count(SOFTIRQ_OFFSET); | ||
| 137 | } | 153 | } |
| 138 | 154 | ||
| 139 | EXPORT_SYMBOL(_local_bh_enable); | 155 | EXPORT_SYMBOL(_local_bh_enable); |
| @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) | |||
| 147 | /* | 163 | /* |
| 148 | * Are softirqs going to be turned on now: | 164 | * Are softirqs going to be turned on now: |
| 149 | */ | 165 | */ |
| 150 | if (softirq_count() == SOFTIRQ_OFFSET) | 166 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
| 151 | trace_softirqs_on(ip); | 167 | trace_softirqs_on(ip); |
| 152 | /* | 168 | /* |
| 153 | * Keep preemption disabled until we are done with | 169 | * Keep preemption disabled until we are done with |
| 154 | * softirq processing: | 170 | * softirq processing: |
| 155 | */ | 171 | */ |
| 156 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | 172 | sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); |
| 157 | 173 | ||
| 158 | if (unlikely(!in_interrupt() && local_softirq_pending())) | 174 | if (unlikely(!in_interrupt() && local_softirq_pending())) |
| 159 | do_softirq(); | 175 | do_softirq(); |
| @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void) | |||
| 198 | pending = local_softirq_pending(); | 214 | pending = local_softirq_pending(); |
| 199 | account_system_vtime(current); | 215 | account_system_vtime(current); |
| 200 | 216 | ||
| 201 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 217 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 218 | SOFTIRQ_OFFSET); | ||
| 202 | lockdep_softirq_enter(); | 219 | lockdep_softirq_enter(); |
| 203 | 220 | ||
| 204 | cpu = smp_processor_id(); | 221 | cpu = smp_processor_id(); |
| @@ -245,7 +262,7 @@ restart: | |||
| 245 | lockdep_softirq_exit(); | 262 | lockdep_softirq_exit(); |
| 246 | 263 | ||
| 247 | account_system_vtime(current); | 264 | account_system_vtime(current); |
| 248 | _local_bh_enable(); | 265 | __local_bh_enable(SOFTIRQ_OFFSET); |
| 249 | } | 266 | } |
| 250 | 267 | ||
| 251 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 268 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
| @@ -279,10 +296,16 @@ void irq_enter(void) | |||
| 279 | 296 | ||
| 280 | rcu_irq_enter(); | 297 | rcu_irq_enter(); |
| 281 | if (idle_cpu(cpu) && !in_interrupt()) { | 298 | if (idle_cpu(cpu) && !in_interrupt()) { |
| 282 | __irq_enter(); | 299 | /* |
| 300 | * Prevent raise_softirq from needlessly waking up ksoftirqd | ||
| 301 | * here, as softirq will be serviced on return from interrupt. | ||
| 302 | */ | ||
| 303 | local_bh_disable(); | ||
| 283 | tick_check_idle(cpu); | 304 | tick_check_idle(cpu); |
| 284 | } else | 305 | _local_bh_enable(); |
| 285 | __irq_enter(); | 306 | } |
| 307 | |||
| 308 | __irq_enter(); | ||
| 286 | } | 309 | } |
| 287 | 310 | ||
| 288 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 311 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
| @@ -696,6 +719,7 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
| 696 | { | 719 | { |
| 697 | set_current_state(TASK_INTERRUPTIBLE); | 720 | set_current_state(TASK_INTERRUPTIBLE); |
| 698 | 721 | ||
| 722 | current->flags |= PF_KSOFTIRQD; | ||
| 699 | while (!kthread_should_stop()) { | 723 | while (!kthread_should_stop()) { |
| 700 | preempt_disable(); | 724 | preempt_disable(); |
| 701 | if (!local_softirq_pending()) { | 725 | if (!local_softirq_pending()) { |
diff --git a/kernel/srcu.c b/kernel/srcu.c index 2980da3fd509..c71e07500536 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
| @@ -46,11 +46,9 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) | |||
| 46 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | 46 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
| 47 | struct lock_class_key *key) | 47 | struct lock_class_key *key) |
| 48 | { | 48 | { |
| 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 50 | /* Don't re-initialize a lock while it is held. */ | 49 | /* Don't re-initialize a lock while it is held. */ |
| 51 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | 50 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
| 52 | lockdep_init_map(&sp->dep_map, name, key, 0); | 51 | lockdep_init_map(&sp->dep_map, name, key, 0); |
| 53 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 54 | return init_srcu_struct_fields(sp); | 52 | return init_srcu_struct_fields(sp); |
| 55 | } | 53 | } |
| 56 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | 54 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 4372ccb25127..090c28812ce1 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -287,11 +287,12 @@ repeat: | |||
| 287 | goto repeat; | 287 | goto repeat; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); | ||
| 291 | |||
| 290 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ | 292 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ |
| 291 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | 293 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, |
| 292 | unsigned long action, void *hcpu) | 294 | unsigned long action, void *hcpu) |
| 293 | { | 295 | { |
| 294 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
| 295 | unsigned int cpu = (unsigned long)hcpu; | 296 | unsigned int cpu = (unsigned long)hcpu; |
| 296 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | 297 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 297 | struct task_struct *p; | 298 | struct task_struct *p; |
| @@ -304,13 +305,13 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
| 304 | cpu); | 305 | cpu); |
| 305 | if (IS_ERR(p)) | 306 | if (IS_ERR(p)) |
| 306 | return NOTIFY_BAD; | 307 | return NOTIFY_BAD; |
| 307 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
| 308 | get_task_struct(p); | 308 | get_task_struct(p); |
| 309 | kthread_bind(p, cpu); | ||
| 310 | sched_set_stop_task(cpu, p); | ||
| 309 | stopper->thread = p; | 311 | stopper->thread = p; |
| 310 | break; | 312 | break; |
| 311 | 313 | ||
| 312 | case CPU_ONLINE: | 314 | case CPU_ONLINE: |
| 313 | kthread_bind(stopper->thread, cpu); | ||
| 314 | /* strictly unnecessary, as first user will wake it */ | 315 | /* strictly unnecessary, as first user will wake it */ |
| 315 | wake_up_process(stopper->thread); | 316 | wake_up_process(stopper->thread); |
| 316 | /* mark enabled */ | 317 | /* mark enabled */ |
| @@ -325,6 +326,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
| 325 | { | 326 | { |
| 326 | struct cpu_stop_work *work; | 327 | struct cpu_stop_work *work; |
| 327 | 328 | ||
| 329 | sched_set_stop_task(cpu, NULL); | ||
| 328 | /* kill the stopper */ | 330 | /* kill the stopper */ |
| 329 | kthread_stop(stopper->thread); | 331 | kthread_stop(stopper->thread); |
| 330 | /* drain remaining works */ | 332 | /* drain remaining works */ |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f88552c6d227..3a45c224770f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -2485,7 +2485,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int | |||
| 2485 | kbuf[left] = 0; | 2485 | kbuf[left] = 0; |
| 2486 | } | 2486 | } |
| 2487 | 2487 | ||
| 2488 | for (; left && vleft--; i++, min++, max++, first=0) { | 2488 | for (; left && vleft--; i++, first = 0) { |
| 2489 | unsigned long val; | 2489 | unsigned long val; |
| 2490 | 2490 | ||
| 2491 | if (write) { | 2491 | if (write) { |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 04cdcf72c827..10b90d8a03c4 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
| @@ -143,15 +143,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
| 143 | if (!table->maxlen) | 143 | if (!table->maxlen) |
| 144 | set_fail(&fail, table, "No maxlen"); | 144 | set_fail(&fail, table, "No maxlen"); |
| 145 | } | 145 | } |
| 146 | if ((table->proc_handler == proc_doulongvec_minmax) || | ||
| 147 | (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { | ||
| 148 | if (table->maxlen > sizeof (unsigned long)) { | ||
| 149 | if (!table->extra1) | ||
| 150 | set_fail(&fail, table, "No min"); | ||
| 151 | if (!table->extra2) | ||
| 152 | set_fail(&fail, table, "No max"); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | #ifdef CONFIG_PROC_SYSCTL | 146 | #ifdef CONFIG_PROC_SYSCTL |
| 156 | if (table->procname && !table->proc_handler) | 147 | if (table->procname && !table->proc_handler) |
| 157 | set_fail(&fail, table, "No proc_handler"); | 148 | set_fail(&fail, table, "No proc_handler"); |
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c index 4f104515a19b..f8b11a283171 100644 --- a/kernel/test_kprobes.c +++ b/kernel/test_kprobes.c | |||
| @@ -115,7 +115,9 @@ static int test_kprobes(void) | |||
| 115 | int ret; | 115 | int ret; |
| 116 | struct kprobe *kps[2] = {&kp, &kp2}; | 116 | struct kprobe *kps[2] = {&kp, &kp2}; |
| 117 | 117 | ||
| 118 | kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | 118 | /* addr and flags should be cleard for reusing kprobe. */ |
| 119 | kp.addr = NULL; | ||
| 120 | kp.flags = 0; | ||
| 119 | ret = register_kprobes(kps, 2); | 121 | ret = register_kprobes(kps, 2); |
| 120 | if (ret < 0) { | 122 | if (ret < 0) { |
| 121 | printk(KERN_ERR "Kprobe smoke test failed: " | 123 | printk(KERN_ERR "Kprobe smoke test failed: " |
| @@ -210,7 +212,9 @@ static int test_jprobes(void) | |||
| 210 | int ret; | 212 | int ret; |
| 211 | struct jprobe *jps[2] = {&jp, &jp2}; | 213 | struct jprobe *jps[2] = {&jp, &jp2}; |
| 212 | 214 | ||
| 213 | jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | 215 | /* addr and flags should be cleard for reusing kprobe. */ |
| 216 | jp.kp.addr = NULL; | ||
| 217 | jp.kp.flags = 0; | ||
| 214 | ret = register_jprobes(jps, 2); | 218 | ret = register_jprobes(jps, 2); |
| 215 | if (ret < 0) { | 219 | if (ret < 0) { |
| 216 | printk(KERN_ERR "Kprobe smoke test failed: " | 220 | printk(KERN_ERR "Kprobe smoke test failed: " |
| @@ -323,7 +327,9 @@ static int test_kretprobes(void) | |||
| 323 | int ret; | 327 | int ret; |
| 324 | struct kretprobe *rps[2] = {&rp, &rp2}; | 328 | struct kretprobe *rps[2] = {&rp, &rp2}; |
| 325 | 329 | ||
| 326 | rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */ | 330 | /* addr and flags should be cleard for reusing kprobe. */ |
| 331 | rp.kp.addr = NULL; | ||
| 332 | rp.kp.flags = 0; | ||
| 327 | ret = register_kretprobes(rps, 2); | 333 | ret = register_kretprobes(rps, 2); |
| 328 | if (ret < 0) { | 334 | if (ret < 0) { |
| 329 | printk(KERN_ERR "Kprobe smoke test failed: " | 335 | printk(KERN_ERR "Kprobe smoke test failed: " |
diff --git a/kernel/timer.c b/kernel/timer.c index 97bf05baade7..68a9ae7679b7 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
| 38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
| 39 | #include <linux/kallsyms.h> | 39 | #include <linux/kallsyms.h> |
| 40 | #include <linux/perf_event.h> | 40 | #include <linux/irq_work.h> |
| 41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
| 42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 43 | 43 | ||
| @@ -1279,7 +1279,10 @@ void update_process_times(int user_tick) | |||
| 1279 | run_local_timers(); | 1279 | run_local_timers(); |
| 1280 | rcu_check_callbacks(cpu, user_tick); | 1280 | rcu_check_callbacks(cpu, user_tick); |
| 1281 | printk_tick(); | 1281 | printk_tick(); |
| 1282 | perf_event_do_pending(); | 1282 | #ifdef CONFIG_IRQ_WORK |
| 1283 | if (in_irq()) | ||
| 1284 | irq_work_run(); | ||
| 1285 | #endif | ||
| 1283 | scheduler_tick(); | 1286 | scheduler_tick(); |
| 1284 | run_posix_cpu_timers(p); | 1287 | run_posix_cpu_timers(p); |
| 1285 | } | 1288 | } |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 538501c6ea50..e550d2eda1df 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS | |||
| 49 | help | 49 | help |
| 50 | See Documentation/trace/ftrace-design.txt | 50 | See Documentation/trace/ftrace-design.txt |
| 51 | 51 | ||
| 52 | config HAVE_C_RECORDMCOUNT | ||
| 53 | bool | ||
| 54 | help | ||
| 55 | C version of recordmcount available? | ||
| 56 | |||
| 52 | config TRACER_MAX_TRACE | 57 | config TRACER_MAX_TRACE |
| 53 | bool | 58 | bool |
| 54 | 59 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fa7ece649fe1..ebd80d50c474 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -884,10 +884,8 @@ enum { | |||
| 884 | FTRACE_ENABLE_CALLS = (1 << 0), | 884 | FTRACE_ENABLE_CALLS = (1 << 0), |
| 885 | FTRACE_DISABLE_CALLS = (1 << 1), | 885 | FTRACE_DISABLE_CALLS = (1 << 1), |
| 886 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 886 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
| 887 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 887 | FTRACE_START_FUNC_RET = (1 << 3), |
| 888 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 888 | FTRACE_STOP_FUNC_RET = (1 << 4), |
| 889 | FTRACE_START_FUNC_RET = (1 << 5), | ||
| 890 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
| 891 | }; | 889 | }; |
| 892 | 890 | ||
| 893 | static int ftrace_filtered; | 891 | static int ftrace_filtered; |
| @@ -1226,8 +1224,6 @@ static void ftrace_shutdown(int command) | |||
| 1226 | 1224 | ||
| 1227 | static void ftrace_startup_sysctl(void) | 1225 | static void ftrace_startup_sysctl(void) |
| 1228 | { | 1226 | { |
| 1229 | int command = FTRACE_ENABLE_MCOUNT; | ||
| 1230 | |||
| 1231 | if (unlikely(ftrace_disabled)) | 1227 | if (unlikely(ftrace_disabled)) |
| 1232 | return; | 1228 | return; |
| 1233 | 1229 | ||
| @@ -1235,23 +1231,17 @@ static void ftrace_startup_sysctl(void) | |||
| 1235 | saved_ftrace_func = NULL; | 1231 | saved_ftrace_func = NULL; |
| 1236 | /* ftrace_start_up is true if we want ftrace running */ | 1232 | /* ftrace_start_up is true if we want ftrace running */ |
| 1237 | if (ftrace_start_up) | 1233 | if (ftrace_start_up) |
| 1238 | command |= FTRACE_ENABLE_CALLS; | 1234 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
| 1239 | |||
| 1240 | ftrace_run_update_code(command); | ||
| 1241 | } | 1235 | } |
| 1242 | 1236 | ||
| 1243 | static void ftrace_shutdown_sysctl(void) | 1237 | static void ftrace_shutdown_sysctl(void) |
| 1244 | { | 1238 | { |
| 1245 | int command = FTRACE_DISABLE_MCOUNT; | ||
| 1246 | |||
| 1247 | if (unlikely(ftrace_disabled)) | 1239 | if (unlikely(ftrace_disabled)) |
| 1248 | return; | 1240 | return; |
| 1249 | 1241 | ||
| 1250 | /* ftrace_start_up is true if ftrace is running */ | 1242 | /* ftrace_start_up is true if ftrace is running */ |
| 1251 | if (ftrace_start_up) | 1243 | if (ftrace_start_up) |
| 1252 | command |= FTRACE_DISABLE_CALLS; | 1244 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); |
| 1253 | |||
| 1254 | ftrace_run_update_code(command); | ||
| 1255 | } | 1245 | } |
| 1256 | 1246 | ||
| 1257 | static cycle_t ftrace_update_time; | 1247 | static cycle_t ftrace_update_time; |
| @@ -1368,24 +1358,29 @@ enum { | |||
| 1368 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1358 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
| 1369 | 1359 | ||
| 1370 | struct ftrace_iterator { | 1360 | struct ftrace_iterator { |
| 1371 | struct ftrace_page *pg; | 1361 | loff_t pos; |
| 1372 | int hidx; | 1362 | loff_t func_pos; |
| 1373 | int idx; | 1363 | struct ftrace_page *pg; |
| 1374 | unsigned flags; | 1364 | struct dyn_ftrace *func; |
| 1375 | struct trace_parser parser; | 1365 | struct ftrace_func_probe *probe; |
| 1366 | struct trace_parser parser; | ||
| 1367 | int hidx; | ||
| 1368 | int idx; | ||
| 1369 | unsigned flags; | ||
| 1376 | }; | 1370 | }; |
| 1377 | 1371 | ||
| 1378 | static void * | 1372 | static void * |
| 1379 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) | 1373 | t_hash_next(struct seq_file *m, loff_t *pos) |
| 1380 | { | 1374 | { |
| 1381 | struct ftrace_iterator *iter = m->private; | 1375 | struct ftrace_iterator *iter = m->private; |
| 1382 | struct hlist_node *hnd = v; | 1376 | struct hlist_node *hnd = NULL; |
| 1383 | struct hlist_head *hhd; | 1377 | struct hlist_head *hhd; |
| 1384 | 1378 | ||
| 1385 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); | ||
| 1386 | |||
| 1387 | (*pos)++; | 1379 | (*pos)++; |
| 1380 | iter->pos = *pos; | ||
| 1388 | 1381 | ||
| 1382 | if (iter->probe) | ||
| 1383 | hnd = &iter->probe->node; | ||
| 1389 | retry: | 1384 | retry: |
| 1390 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | 1385 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) |
| 1391 | return NULL; | 1386 | return NULL; |
| @@ -1408,7 +1403,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1408 | } | 1403 | } |
| 1409 | } | 1404 | } |
| 1410 | 1405 | ||
| 1411 | return hnd; | 1406 | if (WARN_ON_ONCE(!hnd)) |
| 1407 | return NULL; | ||
| 1408 | |||
| 1409 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); | ||
| 1410 | |||
| 1411 | return iter; | ||
| 1412 | } | 1412 | } |
| 1413 | 1413 | ||
| 1414 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | 1414 | static void *t_hash_start(struct seq_file *m, loff_t *pos) |
| @@ -1417,26 +1417,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) | |||
| 1417 | void *p = NULL; | 1417 | void *p = NULL; |
| 1418 | loff_t l; | 1418 | loff_t l; |
| 1419 | 1419 | ||
| 1420 | if (!(iter->flags & FTRACE_ITER_HASH)) | 1420 | if (iter->func_pos > *pos) |
| 1421 | *pos = 0; | 1421 | return NULL; |
| 1422 | |||
| 1423 | iter->flags |= FTRACE_ITER_HASH; | ||
| 1424 | 1422 | ||
| 1425 | iter->hidx = 0; | 1423 | iter->hidx = 0; |
| 1426 | for (l = 0; l <= *pos; ) { | 1424 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
| 1427 | p = t_hash_next(m, p, &l); | 1425 | p = t_hash_next(m, &l); |
| 1428 | if (!p) | 1426 | if (!p) |
| 1429 | break; | 1427 | break; |
| 1430 | } | 1428 | } |
| 1431 | return p; | 1429 | if (!p) |
| 1430 | return NULL; | ||
| 1431 | |||
| 1432 | /* Only set this if we have an item */ | ||
| 1433 | iter->flags |= FTRACE_ITER_HASH; | ||
| 1434 | |||
| 1435 | return iter; | ||
| 1432 | } | 1436 | } |
| 1433 | 1437 | ||
| 1434 | static int t_hash_show(struct seq_file *m, void *v) | 1438 | static int |
| 1439 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) | ||
| 1435 | { | 1440 | { |
| 1436 | struct ftrace_func_probe *rec; | 1441 | struct ftrace_func_probe *rec; |
| 1437 | struct hlist_node *hnd = v; | ||
| 1438 | 1442 | ||
| 1439 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); | 1443 | rec = iter->probe; |
| 1444 | if (WARN_ON_ONCE(!rec)) | ||
| 1445 | return -EIO; | ||
| 1440 | 1446 | ||
| 1441 | if (rec->ops->print) | 1447 | if (rec->ops->print) |
| 1442 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | 1448 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
| @@ -1457,12 +1463,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1457 | struct dyn_ftrace *rec = NULL; | 1463 | struct dyn_ftrace *rec = NULL; |
| 1458 | 1464 | ||
| 1459 | if (iter->flags & FTRACE_ITER_HASH) | 1465 | if (iter->flags & FTRACE_ITER_HASH) |
| 1460 | return t_hash_next(m, v, pos); | 1466 | return t_hash_next(m, pos); |
| 1461 | 1467 | ||
| 1462 | (*pos)++; | 1468 | (*pos)++; |
| 1469 | iter->pos = *pos; | ||
| 1463 | 1470 | ||
| 1464 | if (iter->flags & FTRACE_ITER_PRINTALL) | 1471 | if (iter->flags & FTRACE_ITER_PRINTALL) |
| 1465 | return NULL; | 1472 | return t_hash_start(m, pos); |
| 1466 | 1473 | ||
| 1467 | retry: | 1474 | retry: |
| 1468 | if (iter->idx >= iter->pg->index) { | 1475 | if (iter->idx >= iter->pg->index) { |
| @@ -1491,7 +1498,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1491 | } | 1498 | } |
| 1492 | } | 1499 | } |
| 1493 | 1500 | ||
| 1494 | return rec; | 1501 | if (!rec) |
| 1502 | return t_hash_start(m, pos); | ||
| 1503 | |||
| 1504 | iter->func_pos = *pos; | ||
| 1505 | iter->func = rec; | ||
| 1506 | |||
| 1507 | return iter; | ||
| 1508 | } | ||
| 1509 | |||
| 1510 | static void reset_iter_read(struct ftrace_iterator *iter) | ||
| 1511 | { | ||
| 1512 | iter->pos = 0; | ||
| 1513 | iter->func_pos = 0; | ||
| 1514 | iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); | ||
| 1495 | } | 1515 | } |
| 1496 | 1516 | ||
| 1497 | static void *t_start(struct seq_file *m, loff_t *pos) | 1517 | static void *t_start(struct seq_file *m, loff_t *pos) |
| @@ -1502,6 +1522,12 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1502 | 1522 | ||
| 1503 | mutex_lock(&ftrace_lock); | 1523 | mutex_lock(&ftrace_lock); |
| 1504 | /* | 1524 | /* |
| 1525 | * If an lseek was done, then reset and start from beginning. | ||
| 1526 | */ | ||
| 1527 | if (*pos < iter->pos) | ||
| 1528 | reset_iter_read(iter); | ||
| 1529 | |||
| 1530 | /* | ||
| 1505 | * For set_ftrace_filter reading, if we have the filter | 1531 | * For set_ftrace_filter reading, if we have the filter |
| 1506 | * off, we can short cut and just print out that all | 1532 | * off, we can short cut and just print out that all |
| 1507 | * functions are enabled. | 1533 | * functions are enabled. |
| @@ -1518,6 +1544,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1518 | if (iter->flags & FTRACE_ITER_HASH) | 1544 | if (iter->flags & FTRACE_ITER_HASH) |
| 1519 | return t_hash_start(m, pos); | 1545 | return t_hash_start(m, pos); |
| 1520 | 1546 | ||
| 1547 | /* | ||
| 1548 | * Unfortunately, we need to restart at ftrace_pages_start | ||
| 1549 | * every time we let go of the ftrace_mutex. This is because | ||
| 1550 | * those pointers can change without the lock. | ||
| 1551 | */ | ||
| 1521 | iter->pg = ftrace_pages_start; | 1552 | iter->pg = ftrace_pages_start; |
| 1522 | iter->idx = 0; | 1553 | iter->idx = 0; |
| 1523 | for (l = 0; l <= *pos; ) { | 1554 | for (l = 0; l <= *pos; ) { |
| @@ -1526,10 +1557,14 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 1526 | break; | 1557 | break; |
| 1527 | } | 1558 | } |
| 1528 | 1559 | ||
| 1529 | if (!p && iter->flags & FTRACE_ITER_FILTER) | 1560 | if (!p) { |
| 1530 | return t_hash_start(m, pos); | 1561 | if (iter->flags & FTRACE_ITER_FILTER) |
| 1562 | return t_hash_start(m, pos); | ||
| 1531 | 1563 | ||
| 1532 | return p; | 1564 | return NULL; |
| 1565 | } | ||
| 1566 | |||
| 1567 | return iter; | ||
| 1533 | } | 1568 | } |
| 1534 | 1569 | ||
| 1535 | static void t_stop(struct seq_file *m, void *p) | 1570 | static void t_stop(struct seq_file *m, void *p) |
| @@ -1540,16 +1575,18 @@ static void t_stop(struct seq_file *m, void *p) | |||
| 1540 | static int t_show(struct seq_file *m, void *v) | 1575 | static int t_show(struct seq_file *m, void *v) |
| 1541 | { | 1576 | { |
| 1542 | struct ftrace_iterator *iter = m->private; | 1577 | struct ftrace_iterator *iter = m->private; |
| 1543 | struct dyn_ftrace *rec = v; | 1578 | struct dyn_ftrace *rec; |
| 1544 | 1579 | ||
| 1545 | if (iter->flags & FTRACE_ITER_HASH) | 1580 | if (iter->flags & FTRACE_ITER_HASH) |
| 1546 | return t_hash_show(m, v); | 1581 | return t_hash_show(m, iter); |
| 1547 | 1582 | ||
| 1548 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 1583 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
| 1549 | seq_printf(m, "#### all functions enabled ####\n"); | 1584 | seq_printf(m, "#### all functions enabled ####\n"); |
| 1550 | return 0; | 1585 | return 0; |
| 1551 | } | 1586 | } |
| 1552 | 1587 | ||
| 1588 | rec = iter->func; | ||
| 1589 | |||
| 1553 | if (!rec) | 1590 | if (!rec) |
| 1554 | return 0; | 1591 | return 0; |
| 1555 | 1592 | ||
| @@ -1601,8 +1638,8 @@ ftrace_failures_open(struct inode *inode, struct file *file) | |||
| 1601 | 1638 | ||
| 1602 | ret = ftrace_avail_open(inode, file); | 1639 | ret = ftrace_avail_open(inode, file); |
| 1603 | if (!ret) { | 1640 | if (!ret) { |
| 1604 | m = (struct seq_file *)file->private_data; | 1641 | m = file->private_data; |
| 1605 | iter = (struct ftrace_iterator *)m->private; | 1642 | iter = m->private; |
| 1606 | iter->flags = FTRACE_ITER_FAILURES; | 1643 | iter->flags = FTRACE_ITER_FAILURES; |
| 1607 | } | 1644 | } |
| 1608 | 1645 | ||
| @@ -2418,7 +2455,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
| 2418 | .open = ftrace_filter_open, | 2455 | .open = ftrace_filter_open, |
| 2419 | .read = seq_read, | 2456 | .read = seq_read, |
| 2420 | .write = ftrace_filter_write, | 2457 | .write = ftrace_filter_write, |
| 2421 | .llseek = no_llseek, | 2458 | .llseek = ftrace_regex_lseek, |
| 2422 | .release = ftrace_filter_release, | 2459 | .release = ftrace_filter_release, |
| 2423 | }; | 2460 | }; |
| 2424 | 2461 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 492197e2f86c..c5a632a669e1 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta) | |||
| 405 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) | 405 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) |
| 406 | 406 | ||
| 407 | /* Max number of timestamps that can fit on a page */ | 407 | /* Max number of timestamps that can fit on a page */ |
| 408 | #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) | 408 | #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND) |
| 409 | 409 | ||
| 410 | int ring_buffer_print_page_header(struct trace_seq *s) | 410 | int ring_buffer_print_page_header(struct trace_seq *s) |
| 411 | { | 411 | { |
| @@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
| 2606 | } | 2606 | } |
| 2607 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | 2607 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); |
| 2608 | 2608 | ||
| 2609 | /* | ||
| 2610 | * The total entries in the ring buffer is the running counter | ||
| 2611 | * of entries entered into the ring buffer, minus the sum of | ||
| 2612 | * the entries read from the ring buffer and the number of | ||
| 2613 | * entries that were overwritten. | ||
| 2614 | */ | ||
| 2615 | static inline unsigned long | ||
| 2616 | rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) | ||
| 2617 | { | ||
| 2618 | return local_read(&cpu_buffer->entries) - | ||
| 2619 | (local_read(&cpu_buffer->overrun) + cpu_buffer->read); | ||
| 2620 | } | ||
| 2621 | |||
| 2609 | /** | 2622 | /** |
| 2610 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 2623 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
| 2611 | * @buffer: The ring buffer | 2624 | * @buffer: The ring buffer |
| @@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | |||
| 2614 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 2627 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) |
| 2615 | { | 2628 | { |
| 2616 | struct ring_buffer_per_cpu *cpu_buffer; | 2629 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2617 | unsigned long ret; | ||
| 2618 | 2630 | ||
| 2619 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2631 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 2620 | return 0; | 2632 | return 0; |
| 2621 | 2633 | ||
| 2622 | cpu_buffer = buffer->buffers[cpu]; | 2634 | cpu_buffer = buffer->buffers[cpu]; |
| 2623 | ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun)) | ||
| 2624 | - cpu_buffer->read; | ||
| 2625 | 2635 | ||
| 2626 | return ret; | 2636 | return rb_num_of_entries(cpu_buffer); |
| 2627 | } | 2637 | } |
| 2628 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | 2638 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); |
| 2629 | 2639 | ||
| @@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
| 2684 | /* if you care about this being correct, lock the buffer */ | 2694 | /* if you care about this being correct, lock the buffer */ |
| 2685 | for_each_buffer_cpu(buffer, cpu) { | 2695 | for_each_buffer_cpu(buffer, cpu) { |
| 2686 | cpu_buffer = buffer->buffers[cpu]; | 2696 | cpu_buffer = buffer->buffers[cpu]; |
| 2687 | entries += (local_read(&cpu_buffer->entries) - | 2697 | entries += rb_num_of_entries(cpu_buffer); |
| 2688 | local_read(&cpu_buffer->overrun)) - cpu_buffer->read; | ||
| 2689 | } | 2698 | } |
| 2690 | 2699 | ||
| 2691 | return entries; | 2700 | return entries; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9ec59f541156..001bcd2ccf4a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -2196,7 +2196,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
| 2196 | 2196 | ||
| 2197 | static int tracing_release(struct inode *inode, struct file *file) | 2197 | static int tracing_release(struct inode *inode, struct file *file) |
| 2198 | { | 2198 | { |
| 2199 | struct seq_file *m = (struct seq_file *)file->private_data; | 2199 | struct seq_file *m = file->private_data; |
| 2200 | struct trace_iterator *iter; | 2200 | struct trace_iterator *iter; |
| 2201 | int cpu; | 2201 | int cpu; |
| 2202 | 2202 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d39b3c5454a5..9021f8c0c0c3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -343,6 +343,10 @@ void trace_function(struct trace_array *tr, | |||
| 343 | unsigned long ip, | 343 | unsigned long ip, |
| 344 | unsigned long parent_ip, | 344 | unsigned long parent_ip, |
| 345 | unsigned long flags, int pc); | 345 | unsigned long flags, int pc); |
| 346 | void trace_graph_function(struct trace_array *tr, | ||
| 347 | unsigned long ip, | ||
| 348 | unsigned long parent_ip, | ||
| 349 | unsigned long flags, int pc); | ||
| 346 | void trace_default_header(struct seq_file *m); | 350 | void trace_default_header(struct seq_file *m); |
| 347 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | 351 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); |
| 348 | int trace_empty(struct trace_iterator *iter); | 352 | int trace_empty(struct trace_iterator *iter); |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 31cc4cb0dbf2..39c059ca670e 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
| 10 | #include "trace.h" | 10 | #include "trace.h" |
| 11 | 11 | ||
| 12 | static char *perf_trace_buf[4]; | 12 | static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; |
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * Force it to be aligned to unsigned long to avoid misaligned accesses | 15 | * Force it to be aligned to unsigned long to avoid misaligned accesses |
| @@ -24,7 +24,7 @@ static int total_ref_count; | |||
| 24 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | 24 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, |
| 25 | struct perf_event *p_event) | 25 | struct perf_event *p_event) |
| 26 | { | 26 | { |
| 27 | struct hlist_head *list; | 27 | struct hlist_head __percpu *list; |
| 28 | int ret = -ENOMEM; | 28 | int ret = -ENOMEM; |
| 29 | int cpu; | 29 | int cpu; |
| 30 | 30 | ||
| @@ -42,11 +42,11 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
| 42 | tp_event->perf_events = list; | 42 | tp_event->perf_events = list; |
| 43 | 43 | ||
| 44 | if (!total_ref_count) { | 44 | if (!total_ref_count) { |
| 45 | char *buf; | 45 | char __percpu *buf; |
| 46 | int i; | 46 | int i; |
| 47 | 47 | ||
| 48 | for (i = 0; i < 4; i++) { | 48 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
| 49 | buf = (char *)alloc_percpu(perf_trace_t); | 49 | buf = (char __percpu *)alloc_percpu(perf_trace_t); |
| 50 | if (!buf) | 50 | if (!buf) |
| 51 | goto fail; | 51 | goto fail; |
| 52 | 52 | ||
| @@ -65,7 +65,7 @@ fail: | |||
| 65 | if (!total_ref_count) { | 65 | if (!total_ref_count) { |
| 66 | int i; | 66 | int i; |
| 67 | 67 | ||
| 68 | for (i = 0; i < 4; i++) { | 68 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
| 69 | free_percpu(perf_trace_buf[i]); | 69 | free_percpu(perf_trace_buf[i]); |
| 70 | perf_trace_buf[i] = NULL; | 70 | perf_trace_buf[i] = NULL; |
| 71 | } | 71 | } |
| @@ -101,22 +101,26 @@ int perf_trace_init(struct perf_event *p_event) | |||
| 101 | return ret; | 101 | return ret; |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | int perf_trace_enable(struct perf_event *p_event) | 104 | int perf_trace_add(struct perf_event *p_event, int flags) |
| 105 | { | 105 | { |
| 106 | struct ftrace_event_call *tp_event = p_event->tp_event; | 106 | struct ftrace_event_call *tp_event = p_event->tp_event; |
| 107 | struct hlist_head __percpu *pcpu_list; | ||
| 107 | struct hlist_head *list; | 108 | struct hlist_head *list; |
| 108 | 109 | ||
| 109 | list = tp_event->perf_events; | 110 | pcpu_list = tp_event->perf_events; |
| 110 | if (WARN_ON_ONCE(!list)) | 111 | if (WARN_ON_ONCE(!pcpu_list)) |
| 111 | return -EINVAL; | 112 | return -EINVAL; |
| 112 | 113 | ||
| 113 | list = this_cpu_ptr(list); | 114 | if (!(flags & PERF_EF_START)) |
| 115 | p_event->hw.state = PERF_HES_STOPPED; | ||
| 116 | |||
| 117 | list = this_cpu_ptr(pcpu_list); | ||
| 114 | hlist_add_head_rcu(&p_event->hlist_entry, list); | 118 | hlist_add_head_rcu(&p_event->hlist_entry, list); |
| 115 | 119 | ||
| 116 | return 0; | 120 | return 0; |
| 117 | } | 121 | } |
| 118 | 122 | ||
| 119 | void perf_trace_disable(struct perf_event *p_event) | 123 | void perf_trace_del(struct perf_event *p_event, int flags) |
| 120 | { | 124 | { |
| 121 | hlist_del_rcu(&p_event->hlist_entry); | 125 | hlist_del_rcu(&p_event->hlist_entry); |
| 122 | } | 126 | } |
| @@ -142,7 +146,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
| 142 | tp_event->perf_events = NULL; | 146 | tp_event->perf_events = NULL; |
| 143 | 147 | ||
| 144 | if (!--total_ref_count) { | 148 | if (!--total_ref_count) { |
| 145 | for (i = 0; i < 4; i++) { | 149 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
| 146 | free_percpu(perf_trace_buf[i]); | 150 | free_percpu(perf_trace_buf[i]); |
| 147 | perf_trace_buf[i] = NULL; | 151 | perf_trace_buf[i] = NULL; |
| 148 | } | 152 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4c758f146328..398c0e8b332c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -600,21 +600,29 @@ out: | |||
| 600 | 600 | ||
| 601 | enum { | 601 | enum { |
| 602 | FORMAT_HEADER = 1, | 602 | FORMAT_HEADER = 1, |
| 603 | FORMAT_PRINTFMT = 2, | 603 | FORMAT_FIELD_SEPERATOR = 2, |
| 604 | FORMAT_PRINTFMT = 3, | ||
| 604 | }; | 605 | }; |
| 605 | 606 | ||
| 606 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 607 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
| 607 | { | 608 | { |
| 608 | struct ftrace_event_call *call = m->private; | 609 | struct ftrace_event_call *call = m->private; |
| 609 | struct ftrace_event_field *field; | 610 | struct ftrace_event_field *field; |
| 610 | struct list_head *head; | 611 | struct list_head *common_head = &ftrace_common_fields; |
| 612 | struct list_head *head = trace_get_fields(call); | ||
| 611 | 613 | ||
| 612 | (*pos)++; | 614 | (*pos)++; |
| 613 | 615 | ||
| 614 | switch ((unsigned long)v) { | 616 | switch ((unsigned long)v) { |
| 615 | case FORMAT_HEADER: | 617 | case FORMAT_HEADER: |
| 616 | head = &ftrace_common_fields; | 618 | if (unlikely(list_empty(common_head))) |
| 619 | return NULL; | ||
| 620 | |||
| 621 | field = list_entry(common_head->prev, | ||
| 622 | struct ftrace_event_field, link); | ||
| 623 | return field; | ||
| 617 | 624 | ||
| 625 | case FORMAT_FIELD_SEPERATOR: | ||
| 618 | if (unlikely(list_empty(head))) | 626 | if (unlikely(list_empty(head))) |
| 619 | return NULL; | 627 | return NULL; |
| 620 | 628 | ||
| @@ -626,31 +634,10 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 626 | return NULL; | 634 | return NULL; |
| 627 | } | 635 | } |
| 628 | 636 | ||
| 629 | head = trace_get_fields(call); | ||
| 630 | |||
| 631 | /* | ||
| 632 | * To separate common fields from event fields, the | ||
| 633 | * LSB is set on the first event field. Clear it in case. | ||
| 634 | */ | ||
| 635 | v = (void *)((unsigned long)v & ~1L); | ||
| 636 | |||
| 637 | field = v; | 637 | field = v; |
| 638 | /* | 638 | if (field->link.prev == common_head) |
| 639 | * If this is a common field, and at the end of the list, then | 639 | return (void *)FORMAT_FIELD_SEPERATOR; |
| 640 | * continue with main list. | 640 | else if (field->link.prev == head) |
| 641 | */ | ||
| 642 | if (field->link.prev == &ftrace_common_fields) { | ||
| 643 | if (unlikely(list_empty(head))) | ||
| 644 | return NULL; | ||
| 645 | field = list_entry(head->prev, struct ftrace_event_field, link); | ||
| 646 | /* Set the LSB to notify f_show to print an extra newline */ | ||
| 647 | field = (struct ftrace_event_field *) | ||
| 648 | ((unsigned long)field | 1); | ||
| 649 | return field; | ||
| 650 | } | ||
| 651 | |||
| 652 | /* If we are done tell f_show to print the format */ | ||
| 653 | if (field->link.prev == head) | ||
| 654 | return (void *)FORMAT_PRINTFMT; | 641 | return (void *)FORMAT_PRINTFMT; |
| 655 | 642 | ||
| 656 | field = list_entry(field->link.prev, struct ftrace_event_field, link); | 643 | field = list_entry(field->link.prev, struct ftrace_event_field, link); |
| @@ -688,22 +675,16 @@ static int f_show(struct seq_file *m, void *v) | |||
| 688 | seq_printf(m, "format:\n"); | 675 | seq_printf(m, "format:\n"); |
| 689 | return 0; | 676 | return 0; |
| 690 | 677 | ||
| 678 | case FORMAT_FIELD_SEPERATOR: | ||
| 679 | seq_putc(m, '\n'); | ||
| 680 | return 0; | ||
| 681 | |||
| 691 | case FORMAT_PRINTFMT: | 682 | case FORMAT_PRINTFMT: |
| 692 | seq_printf(m, "\nprint fmt: %s\n", | 683 | seq_printf(m, "\nprint fmt: %s\n", |
| 693 | call->print_fmt); | 684 | call->print_fmt); |
| 694 | return 0; | 685 | return 0; |
| 695 | } | 686 | } |
| 696 | 687 | ||
| 697 | /* | ||
| 698 | * To separate common fields from event fields, the | ||
| 699 | * LSB is set on the first event field. Clear it and | ||
| 700 | * print a newline if it is set. | ||
| 701 | */ | ||
| 702 | if ((unsigned long)v & 1) { | ||
| 703 | seq_putc(m, '\n'); | ||
| 704 | v = (void *)((unsigned long)v & ~1L); | ||
| 705 | } | ||
| 706 | |||
| 707 | field = v; | 688 | field = v; |
| 708 | 689 | ||
| 709 | /* | 690 | /* |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6f233698518e..76b05980225c 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -15,15 +15,19 @@ | |||
| 15 | #include "trace.h" | 15 | #include "trace.h" |
| 16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
| 17 | 17 | ||
| 18 | /* When set, irq functions will be ignored */ | ||
| 19 | static int ftrace_graph_skip_irqs; | ||
| 20 | |||
| 18 | struct fgraph_cpu_data { | 21 | struct fgraph_cpu_data { |
| 19 | pid_t last_pid; | 22 | pid_t last_pid; |
| 20 | int depth; | 23 | int depth; |
| 24 | int depth_irq; | ||
| 21 | int ignore; | 25 | int ignore; |
| 22 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; | 26 | unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; |
| 23 | }; | 27 | }; |
| 24 | 28 | ||
| 25 | struct fgraph_data { | 29 | struct fgraph_data { |
| 26 | struct fgraph_cpu_data *cpu_data; | 30 | struct fgraph_cpu_data __percpu *cpu_data; |
| 27 | 31 | ||
| 28 | /* Place to preserve last processed entry. */ | 32 | /* Place to preserve last processed entry. */ |
| 29 | struct ftrace_graph_ent_entry ent; | 33 | struct ftrace_graph_ent_entry ent; |
| @@ -41,6 +45,7 @@ struct fgraph_data { | |||
| 41 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 45 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 42 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 46 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
| 43 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 47 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
| 48 | #define TRACE_GRAPH_PRINT_IRQS 0x40 | ||
| 44 | 49 | ||
| 45 | static struct tracer_opt trace_opts[] = { | 50 | static struct tracer_opt trace_opts[] = { |
| 46 | /* Display overruns? (for self-debug purpose) */ | 51 | /* Display overruns? (for self-debug purpose) */ |
| @@ -55,13 +60,15 @@ static struct tracer_opt trace_opts[] = { | |||
| 55 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | 60 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, |
| 56 | /* Display absolute time of an entry */ | 61 | /* Display absolute time of an entry */ |
| 57 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | 62 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, |
| 63 | /* Display interrupts */ | ||
| 64 | { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, | ||
| 58 | { } /* Empty entry */ | 65 | { } /* Empty entry */ |
| 59 | }; | 66 | }; |
| 60 | 67 | ||
| 61 | static struct tracer_flags tracer_flags = { | 68 | static struct tracer_flags tracer_flags = { |
| 62 | /* Don't display overruns and proc by default */ | 69 | /* Don't display overruns and proc by default */ |
| 63 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | | 70 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
| 64 | TRACE_GRAPH_PRINT_DURATION, | 71 | TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, |
| 65 | .opts = trace_opts | 72 | .opts = trace_opts |
| 66 | }; | 73 | }; |
| 67 | 74 | ||
| @@ -204,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr, | |||
| 204 | return 1; | 211 | return 1; |
| 205 | } | 212 | } |
| 206 | 213 | ||
| 214 | static inline int ftrace_graph_ignore_irqs(void) | ||
| 215 | { | ||
| 216 | if (!ftrace_graph_skip_irqs) | ||
| 217 | return 0; | ||
| 218 | |||
| 219 | return in_irq(); | ||
| 220 | } | ||
| 221 | |||
| 207 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 222 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
| 208 | { | 223 | { |
| 209 | struct trace_array *tr = graph_array; | 224 | struct trace_array *tr = graph_array; |
| @@ -218,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 218 | return 0; | 233 | return 0; |
| 219 | 234 | ||
| 220 | /* trace it when it is-nested-in or is a function enabled. */ | 235 | /* trace it when it is-nested-in or is a function enabled. */ |
| 221 | if (!(trace->depth || ftrace_graph_addr(trace->func))) | 236 | if (!(trace->depth || ftrace_graph_addr(trace->func)) || |
| 237 | ftrace_graph_ignore_irqs()) | ||
| 222 | return 0; | 238 | return 0; |
| 223 | 239 | ||
| 224 | local_irq_save(flags); | 240 | local_irq_save(flags); |
| @@ -246,6 +262,34 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | |||
| 246 | return trace_graph_entry(trace); | 262 | return trace_graph_entry(trace); |
| 247 | } | 263 | } |
| 248 | 264 | ||
| 265 | static void | ||
| 266 | __trace_graph_function(struct trace_array *tr, | ||
| 267 | unsigned long ip, unsigned long flags, int pc) | ||
| 268 | { | ||
| 269 | u64 time = trace_clock_local(); | ||
| 270 | struct ftrace_graph_ent ent = { | ||
| 271 | .func = ip, | ||
| 272 | .depth = 0, | ||
| 273 | }; | ||
| 274 | struct ftrace_graph_ret ret = { | ||
| 275 | .func = ip, | ||
| 276 | .depth = 0, | ||
| 277 | .calltime = time, | ||
| 278 | .rettime = time, | ||
| 279 | }; | ||
| 280 | |||
| 281 | __trace_graph_entry(tr, &ent, flags, pc); | ||
| 282 | __trace_graph_return(tr, &ret, flags, pc); | ||
| 283 | } | ||
| 284 | |||
| 285 | void | ||
| 286 | trace_graph_function(struct trace_array *tr, | ||
| 287 | unsigned long ip, unsigned long parent_ip, | ||
| 288 | unsigned long flags, int pc) | ||
| 289 | { | ||
| 290 | __trace_graph_function(tr, ip, flags, pc); | ||
| 291 | } | ||
| 292 | |||
| 249 | void __trace_graph_return(struct trace_array *tr, | 293 | void __trace_graph_return(struct trace_array *tr, |
| 250 | struct ftrace_graph_ret *trace, | 294 | struct ftrace_graph_ret *trace, |
| 251 | unsigned long flags, | 295 | unsigned long flags, |
| @@ -649,8 +693,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
| 649 | 693 | ||
| 650 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 694 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
| 651 | if (len < 7) { | 695 | if (len < 7) { |
| 652 | snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", | 696 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
| 653 | nsecs_rem); | 697 | |
| 698 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | ||
| 654 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 699 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
| 655 | if (!ret) | 700 | if (!ret) |
| 656 | return TRACE_TYPE_PARTIAL_LINE; | 701 | return TRACE_TYPE_PARTIAL_LINE; |
| @@ -855,6 +900,108 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
| 855 | return 0; | 900 | return 0; |
| 856 | } | 901 | } |
| 857 | 902 | ||
| 903 | /* | ||
| 904 | * Entry check for irq code | ||
| 905 | * | ||
| 906 | * returns 1 if | ||
| 907 | * - we are inside irq code | ||
| 908 | * - we just extered irq code | ||
| 909 | * | ||
| 910 | * retunns 0 if | ||
| 911 | * - funcgraph-interrupts option is set | ||
| 912 | * - we are not inside irq code | ||
| 913 | */ | ||
| 914 | static int | ||
| 915 | check_irq_entry(struct trace_iterator *iter, u32 flags, | ||
| 916 | unsigned long addr, int depth) | ||
| 917 | { | ||
| 918 | int cpu = iter->cpu; | ||
| 919 | int *depth_irq; | ||
| 920 | struct fgraph_data *data = iter->private; | ||
| 921 | |||
| 922 | /* | ||
| 923 | * If we are either displaying irqs, or we got called as | ||
| 924 | * a graph event and private data does not exist, | ||
| 925 | * then we bypass the irq check. | ||
| 926 | */ | ||
| 927 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | ||
| 928 | (!data)) | ||
| 929 | return 0; | ||
| 930 | |||
| 931 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 932 | |||
| 933 | /* | ||
| 934 | * We are inside the irq code | ||
| 935 | */ | ||
| 936 | if (*depth_irq >= 0) | ||
| 937 | return 1; | ||
| 938 | |||
| 939 | if ((addr < (unsigned long)__irqentry_text_start) || | ||
| 940 | (addr >= (unsigned long)__irqentry_text_end)) | ||
| 941 | return 0; | ||
| 942 | |||
| 943 | /* | ||
| 944 | * We are entering irq code. | ||
| 945 | */ | ||
| 946 | *depth_irq = depth; | ||
| 947 | return 1; | ||
| 948 | } | ||
| 949 | |||
| 950 | /* | ||
| 951 | * Return check for irq code | ||
| 952 | * | ||
| 953 | * returns 1 if | ||
| 954 | * - we are inside irq code | ||
| 955 | * - we just left irq code | ||
| 956 | * | ||
| 957 | * returns 0 if | ||
| 958 | * - funcgraph-interrupts option is set | ||
| 959 | * - we are not inside irq code | ||
| 960 | */ | ||
| 961 | static int | ||
| 962 | check_irq_return(struct trace_iterator *iter, u32 flags, int depth) | ||
| 963 | { | ||
| 964 | int cpu = iter->cpu; | ||
| 965 | int *depth_irq; | ||
| 966 | struct fgraph_data *data = iter->private; | ||
| 967 | |||
| 968 | /* | ||
| 969 | * If we are either displaying irqs, or we got called as | ||
| 970 | * a graph event and private data does not exist, | ||
| 971 | * then we bypass the irq check. | ||
| 972 | */ | ||
| 973 | if ((flags & TRACE_GRAPH_PRINT_IRQS) || | ||
| 974 | (!data)) | ||
| 975 | return 0; | ||
| 976 | |||
| 977 | depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 978 | |||
| 979 | /* | ||
| 980 | * We are not inside the irq code. | ||
| 981 | */ | ||
| 982 | if (*depth_irq == -1) | ||
| 983 | return 0; | ||
| 984 | |||
| 985 | /* | ||
| 986 | * We are inside the irq code, and this is returning entry. | ||
| 987 | * Let's not trace it and clear the entry depth, since | ||
| 988 | * we are out of irq code. | ||
| 989 | * | ||
| 990 | * This condition ensures that we 'leave the irq code' once | ||
| 991 | * we are out of the entry depth. Thus protecting us from | ||
| 992 | * the RETURN entry loss. | ||
| 993 | */ | ||
| 994 | if (*depth_irq >= depth) { | ||
| 995 | *depth_irq = -1; | ||
| 996 | return 1; | ||
| 997 | } | ||
| 998 | |||
| 999 | /* | ||
| 1000 | * We are inside the irq code, and this is not the entry. | ||
| 1001 | */ | ||
| 1002 | return 1; | ||
| 1003 | } | ||
| 1004 | |||
| 858 | static enum print_line_t | 1005 | static enum print_line_t |
| 859 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 1006 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
| 860 | struct trace_iterator *iter, u32 flags) | 1007 | struct trace_iterator *iter, u32 flags) |
| @@ -865,6 +1012,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 865 | static enum print_line_t ret; | 1012 | static enum print_line_t ret; |
| 866 | int cpu = iter->cpu; | 1013 | int cpu = iter->cpu; |
| 867 | 1014 | ||
| 1015 | if (check_irq_entry(iter, flags, call->func, call->depth)) | ||
| 1016 | return TRACE_TYPE_HANDLED; | ||
| 1017 | |||
| 868 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 1018 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) |
| 869 | return TRACE_TYPE_PARTIAL_LINE; | 1019 | return TRACE_TYPE_PARTIAL_LINE; |
| 870 | 1020 | ||
| @@ -902,6 +1052,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 902 | int ret; | 1052 | int ret; |
| 903 | int i; | 1053 | int i; |
| 904 | 1054 | ||
| 1055 | if (check_irq_return(iter, flags, trace->depth)) | ||
| 1056 | return TRACE_TYPE_HANDLED; | ||
| 1057 | |||
| 905 | if (data) { | 1058 | if (data) { |
| 906 | struct fgraph_cpu_data *cpu_data; | 1059 | struct fgraph_cpu_data *cpu_data; |
| 907 | int cpu = iter->cpu; | 1060 | int cpu = iter->cpu; |
| @@ -1054,7 +1207,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 1054 | 1207 | ||
| 1055 | 1208 | ||
| 1056 | enum print_line_t | 1209 | enum print_line_t |
| 1057 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) | 1210 | __print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
| 1058 | { | 1211 | { |
| 1059 | struct ftrace_graph_ent_entry *field; | 1212 | struct ftrace_graph_ent_entry *field; |
| 1060 | struct fgraph_data *data = iter->private; | 1213 | struct fgraph_data *data = iter->private; |
| @@ -1117,7 +1270,18 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) | |||
| 1117 | static enum print_line_t | 1270 | static enum print_line_t |
| 1118 | print_graph_function(struct trace_iterator *iter) | 1271 | print_graph_function(struct trace_iterator *iter) |
| 1119 | { | 1272 | { |
| 1120 | return print_graph_function_flags(iter, tracer_flags.val); | 1273 | return __print_graph_function_flags(iter, tracer_flags.val); |
| 1274 | } | ||
| 1275 | |||
| 1276 | enum print_line_t print_graph_function_flags(struct trace_iterator *iter, | ||
| 1277 | u32 flags) | ||
| 1278 | { | ||
| 1279 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 1280 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 1281 | else | ||
| 1282 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 1283 | |||
| 1284 | return __print_graph_function_flags(iter, flags); | ||
| 1121 | } | 1285 | } |
| 1122 | 1286 | ||
| 1123 | static enum print_line_t | 1287 | static enum print_line_t |
| @@ -1149,7 +1313,7 @@ static void print_lat_header(struct seq_file *s, u32 flags) | |||
| 1149 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | 1313 | seq_printf(s, "#%.*s|||| / \n", size, spaces); |
| 1150 | } | 1314 | } |
| 1151 | 1315 | ||
| 1152 | void print_graph_headers_flags(struct seq_file *s, u32 flags) | 1316 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) |
| 1153 | { | 1317 | { |
| 1154 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | 1318 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; |
| 1155 | 1319 | ||
| @@ -1190,6 +1354,23 @@ void print_graph_headers(struct seq_file *s) | |||
| 1190 | print_graph_headers_flags(s, tracer_flags.val); | 1354 | print_graph_headers_flags(s, tracer_flags.val); |
| 1191 | } | 1355 | } |
| 1192 | 1356 | ||
| 1357 | void print_graph_headers_flags(struct seq_file *s, u32 flags) | ||
| 1358 | { | ||
| 1359 | struct trace_iterator *iter = s->private; | ||
| 1360 | |||
| 1361 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | ||
| 1362 | /* print nothing if the buffers are empty */ | ||
| 1363 | if (trace_empty(iter)) | ||
| 1364 | return; | ||
| 1365 | |||
| 1366 | print_trace_header(s, iter); | ||
| 1367 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 1368 | } else | ||
| 1369 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 1370 | |||
| 1371 | __print_graph_headers_flags(s, flags); | ||
| 1372 | } | ||
| 1373 | |||
| 1193 | void graph_trace_open(struct trace_iterator *iter) | 1374 | void graph_trace_open(struct trace_iterator *iter) |
| 1194 | { | 1375 | { |
| 1195 | /* pid and depth on the last trace processed */ | 1376 | /* pid and depth on the last trace processed */ |
| @@ -1210,9 +1391,12 @@ void graph_trace_open(struct trace_iterator *iter) | |||
| 1210 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 1391 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
| 1211 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 1392 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
| 1212 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | 1393 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); |
| 1394 | int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); | ||
| 1395 | |||
| 1213 | *pid = -1; | 1396 | *pid = -1; |
| 1214 | *depth = 0; | 1397 | *depth = 0; |
| 1215 | *ignore = 0; | 1398 | *ignore = 0; |
| 1399 | *depth_irq = -1; | ||
| 1216 | } | 1400 | } |
| 1217 | 1401 | ||
| 1218 | iter->private = data; | 1402 | iter->private = data; |
| @@ -1235,6 +1419,14 @@ void graph_trace_close(struct trace_iterator *iter) | |||
| 1235 | } | 1419 | } |
| 1236 | } | 1420 | } |
| 1237 | 1421 | ||
| 1422 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | ||
| 1423 | { | ||
| 1424 | if (bit == TRACE_GRAPH_PRINT_IRQS) | ||
| 1425 | ftrace_graph_skip_irqs = !set; | ||
| 1426 | |||
| 1427 | return 0; | ||
| 1428 | } | ||
| 1429 | |||
| 1238 | static struct trace_event_functions graph_functions = { | 1430 | static struct trace_event_functions graph_functions = { |
| 1239 | .trace = print_graph_function_event, | 1431 | .trace = print_graph_function_event, |
| 1240 | }; | 1432 | }; |
| @@ -1261,6 +1453,7 @@ static struct tracer graph_trace __read_mostly = { | |||
| 1261 | .print_line = print_graph_function, | 1453 | .print_line = print_graph_function, |
| 1262 | .print_header = print_graph_headers, | 1454 | .print_header = print_graph_headers, |
| 1263 | .flags = &tracer_flags, | 1455 | .flags = &tracer_flags, |
| 1456 | .set_flag = func_graph_set_flag, | ||
| 1264 | #ifdef CONFIG_FTRACE_SELFTEST | 1457 | #ifdef CONFIG_FTRACE_SELFTEST |
| 1265 | .selftest = trace_selftest_startup_function_graph, | 1458 | .selftest = trace_selftest_startup_function_graph, |
| 1266 | #endif | 1459 | #endif |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 73a6b0601f2e..5cf8c602b880 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -87,14 +87,22 @@ static __cacheline_aligned_in_smp unsigned long max_sequence; | |||
| 87 | 87 | ||
| 88 | #ifdef CONFIG_FUNCTION_TRACER | 88 | #ifdef CONFIG_FUNCTION_TRACER |
| 89 | /* | 89 | /* |
| 90 | * irqsoff uses its own tracer function to keep the overhead down: | 90 | * Prologue for the preempt and irqs off function tracers. |
| 91 | * | ||
| 92 | * Returns 1 if it is OK to continue, and data->disabled is | ||
| 93 | * incremented. | ||
| 94 | * 0 if the trace is to be ignored, and data->disabled | ||
| 95 | * is kept the same. | ||
| 96 | * | ||
| 97 | * Note, this function is also used outside this ifdef but | ||
| 98 | * inside the #ifdef of the function graph tracer below. | ||
| 99 | * This is OK, since the function graph tracer is | ||
| 100 | * dependent on the function tracer. | ||
| 91 | */ | 101 | */ |
| 92 | static void | 102 | static int func_prolog_dec(struct trace_array *tr, |
| 93 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 103 | struct trace_array_cpu **data, |
| 104 | unsigned long *flags) | ||
| 94 | { | 105 | { |
| 95 | struct trace_array *tr = irqsoff_trace; | ||
| 96 | struct trace_array_cpu *data; | ||
| 97 | unsigned long flags; | ||
| 98 | long disabled; | 106 | long disabled; |
| 99 | int cpu; | 107 | int cpu; |
| 100 | 108 | ||
| @@ -106,18 +114,38 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 106 | */ | 114 | */ |
| 107 | cpu = raw_smp_processor_id(); | 115 | cpu = raw_smp_processor_id(); |
| 108 | if (likely(!per_cpu(tracing_cpu, cpu))) | 116 | if (likely(!per_cpu(tracing_cpu, cpu))) |
| 109 | return; | 117 | return 0; |
| 110 | 118 | ||
| 111 | local_save_flags(flags); | 119 | local_save_flags(*flags); |
| 112 | /* slight chance to get a false positive on tracing_cpu */ | 120 | /* slight chance to get a false positive on tracing_cpu */ |
| 113 | if (!irqs_disabled_flags(flags)) | 121 | if (!irqs_disabled_flags(*flags)) |
| 114 | return; | 122 | return 0; |
| 115 | 123 | ||
| 116 | data = tr->data[cpu]; | 124 | *data = tr->data[cpu]; |
| 117 | disabled = atomic_inc_return(&data->disabled); | 125 | disabled = atomic_inc_return(&(*data)->disabled); |
| 118 | 126 | ||
| 119 | if (likely(disabled == 1)) | 127 | if (likely(disabled == 1)) |
| 120 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 128 | return 1; |
| 129 | |||
| 130 | atomic_dec(&(*data)->disabled); | ||
| 131 | |||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * irqsoff uses its own tracer function to keep the overhead down: | ||
| 137 | */ | ||
| 138 | static void | ||
| 139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | ||
| 140 | { | ||
| 141 | struct trace_array *tr = irqsoff_trace; | ||
| 142 | struct trace_array_cpu *data; | ||
| 143 | unsigned long flags; | ||
| 144 | |||
| 145 | if (!func_prolog_dec(tr, &data, &flags)) | ||
| 146 | return; | ||
| 147 | |||
| 148 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | ||
| 121 | 149 | ||
| 122 | atomic_dec(&data->disabled); | 150 | atomic_dec(&data->disabled); |
| 123 | } | 151 | } |
| @@ -155,30 +183,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | |||
| 155 | struct trace_array *tr = irqsoff_trace; | 183 | struct trace_array *tr = irqsoff_trace; |
| 156 | struct trace_array_cpu *data; | 184 | struct trace_array_cpu *data; |
| 157 | unsigned long flags; | 185 | unsigned long flags; |
| 158 | long disabled; | ||
| 159 | int ret; | 186 | int ret; |
| 160 | int cpu; | ||
| 161 | int pc; | 187 | int pc; |
| 162 | 188 | ||
| 163 | cpu = raw_smp_processor_id(); | 189 | if (!func_prolog_dec(tr, &data, &flags)) |
| 164 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
| 165 | return 0; | 190 | return 0; |
| 166 | 191 | ||
| 167 | local_save_flags(flags); | 192 | pc = preempt_count(); |
| 168 | /* slight chance to get a false positive on tracing_cpu */ | 193 | ret = __trace_graph_entry(tr, trace, flags, pc); |
| 169 | if (!irqs_disabled_flags(flags)) | ||
| 170 | return 0; | ||
| 171 | |||
| 172 | data = tr->data[cpu]; | ||
| 173 | disabled = atomic_inc_return(&data->disabled); | ||
| 174 | |||
| 175 | if (likely(disabled == 1)) { | ||
| 176 | pc = preempt_count(); | ||
| 177 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
| 178 | } else | ||
| 179 | ret = 0; | ||
| 180 | |||
| 181 | atomic_dec(&data->disabled); | 194 | atomic_dec(&data->disabled); |
| 195 | |||
| 182 | return ret; | 196 | return ret; |
| 183 | } | 197 | } |
| 184 | 198 | ||
| @@ -187,27 +201,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | |||
| 187 | struct trace_array *tr = irqsoff_trace; | 201 | struct trace_array *tr = irqsoff_trace; |
| 188 | struct trace_array_cpu *data; | 202 | struct trace_array_cpu *data; |
| 189 | unsigned long flags; | 203 | unsigned long flags; |
| 190 | long disabled; | ||
| 191 | int cpu; | ||
| 192 | int pc; | 204 | int pc; |
| 193 | 205 | ||
| 194 | cpu = raw_smp_processor_id(); | 206 | if (!func_prolog_dec(tr, &data, &flags)) |
| 195 | if (likely(!per_cpu(tracing_cpu, cpu))) | ||
| 196 | return; | 207 | return; |
| 197 | 208 | ||
| 198 | local_save_flags(flags); | 209 | pc = preempt_count(); |
| 199 | /* slight chance to get a false positive on tracing_cpu */ | 210 | __trace_graph_return(tr, trace, flags, pc); |
| 200 | if (!irqs_disabled_flags(flags)) | ||
| 201 | return; | ||
| 202 | |||
| 203 | data = tr->data[cpu]; | ||
| 204 | disabled = atomic_inc_return(&data->disabled); | ||
| 205 | |||
| 206 | if (likely(disabled == 1)) { | ||
| 207 | pc = preempt_count(); | ||
| 208 | __trace_graph_return(tr, trace, flags, pc); | ||
| 209 | } | ||
| 210 | |||
| 211 | atomic_dec(&data->disabled); | 211 | atomic_dec(&data->disabled); |
| 212 | } | 212 | } |
| 213 | 213 | ||
| @@ -229,75 +229,33 @@ static void irqsoff_trace_close(struct trace_iterator *iter) | |||
| 229 | 229 | ||
| 230 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | 230 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
| 231 | { | 231 | { |
| 232 | u32 flags = GRAPH_TRACER_FLAGS; | ||
| 233 | |||
| 234 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 235 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 236 | else | ||
| 237 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 238 | |||
| 239 | /* | 232 | /* |
| 240 | * In graph mode call the graph tracer output function, | 233 | * In graph mode call the graph tracer output function, |
| 241 | * otherwise go with the TRACE_FN event handler | 234 | * otherwise go with the TRACE_FN event handler |
| 242 | */ | 235 | */ |
| 243 | if (is_graph()) | 236 | if (is_graph()) |
| 244 | return print_graph_function_flags(iter, flags); | 237 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
| 245 | 238 | ||
| 246 | return TRACE_TYPE_UNHANDLED; | 239 | return TRACE_TYPE_UNHANDLED; |
| 247 | } | 240 | } |
| 248 | 241 | ||
| 249 | static void irqsoff_print_header(struct seq_file *s) | 242 | static void irqsoff_print_header(struct seq_file *s) |
| 250 | { | 243 | { |
| 251 | if (is_graph()) { | 244 | if (is_graph()) |
| 252 | struct trace_iterator *iter = s->private; | 245 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
| 253 | u32 flags = GRAPH_TRACER_FLAGS; | 246 | else |
| 254 | |||
| 255 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | ||
| 256 | /* print nothing if the buffers are empty */ | ||
| 257 | if (trace_empty(iter)) | ||
| 258 | return; | ||
| 259 | |||
| 260 | print_trace_header(s, iter); | ||
| 261 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
| 262 | } else | ||
| 263 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
| 264 | |||
| 265 | print_graph_headers_flags(s, flags); | ||
| 266 | } else | ||
| 267 | trace_default_header(s); | 247 | trace_default_header(s); |
| 268 | } | 248 | } |
| 269 | 249 | ||
| 270 | static void | 250 | static void |
| 271 | trace_graph_function(struct trace_array *tr, | ||
| 272 | unsigned long ip, unsigned long flags, int pc) | ||
| 273 | { | ||
| 274 | u64 time = trace_clock_local(); | ||
| 275 | struct ftrace_graph_ent ent = { | ||
| 276 | .func = ip, | ||
| 277 | .depth = 0, | ||
| 278 | }; | ||
| 279 | struct ftrace_graph_ret ret = { | ||
| 280 | .func = ip, | ||
| 281 | .depth = 0, | ||
| 282 | .calltime = time, | ||
| 283 | .rettime = time, | ||
| 284 | }; | ||
| 285 | |||
| 286 | __trace_graph_entry(tr, &ent, flags, pc); | ||
| 287 | __trace_graph_return(tr, &ret, flags, pc); | ||
| 288 | } | ||
| 289 | |||
| 290 | static void | ||
| 291 | __trace_function(struct trace_array *tr, | 251 | __trace_function(struct trace_array *tr, |
| 292 | unsigned long ip, unsigned long parent_ip, | 252 | unsigned long ip, unsigned long parent_ip, |
| 293 | unsigned long flags, int pc) | 253 | unsigned long flags, int pc) |
| 294 | { | 254 | { |
| 295 | if (!is_graph()) | 255 | if (is_graph()) |
| 256 | trace_graph_function(tr, ip, parent_ip, flags, pc); | ||
| 257 | else | ||
| 296 | trace_function(tr, ip, parent_ip, flags, pc); | 258 | trace_function(tr, ip, parent_ip, flags, pc); |
| 297 | else { | ||
| 298 | trace_graph_function(tr, parent_ip, flags, pc); | ||
| 299 | trace_graph_function(tr, ip, flags, pc); | ||
| 300 | } | ||
| 301 | } | 259 | } |
| 302 | 260 | ||
| 303 | #else | 261 | #else |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4086eae6e81b..7319559ed59f 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -31,48 +31,98 @@ static int wakeup_rt; | |||
| 31 | static arch_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = |
| 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 33 | 33 | ||
| 34 | static void wakeup_reset(struct trace_array *tr); | ||
| 34 | static void __wakeup_reset(struct trace_array *tr); | 35 | static void __wakeup_reset(struct trace_array *tr); |
| 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | ||
| 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | ||
| 35 | 38 | ||
| 36 | static int save_lat_flag; | 39 | static int save_lat_flag; |
| 37 | 40 | ||
| 41 | #define TRACE_DISPLAY_GRAPH 1 | ||
| 42 | |||
| 43 | static struct tracer_opt trace_opts[] = { | ||
| 44 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 45 | /* display latency trace as call graph */ | ||
| 46 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | ||
| 47 | #endif | ||
| 48 | { } /* Empty entry */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | static struct tracer_flags tracer_flags = { | ||
| 52 | .val = 0, | ||
| 53 | .opts = trace_opts, | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | ||
| 57 | |||
| 38 | #ifdef CONFIG_FUNCTION_TRACER | 58 | #ifdef CONFIG_FUNCTION_TRACER |
| 59 | |||
| 39 | /* | 60 | /* |
| 40 | * irqsoff uses its own tracer function to keep the overhead down: | 61 | * Prologue for the wakeup function tracers. |
| 62 | * | ||
| 63 | * Returns 1 if it is OK to continue, and preemption | ||
| 64 | * is disabled and data->disabled is incremented. | ||
| 65 | * 0 if the trace is to be ignored, and preemption | ||
| 66 | * is not disabled and data->disabled is | ||
| 67 | * kept the same. | ||
| 68 | * | ||
| 69 | * Note, this function is also used outside this ifdef but | ||
| 70 | * inside the #ifdef of the function graph tracer below. | ||
| 71 | * This is OK, since the function graph tracer is | ||
| 72 | * dependent on the function tracer. | ||
| 41 | */ | 73 | */ |
| 42 | static void | 74 | static int |
| 43 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | 75 | func_prolog_preempt_disable(struct trace_array *tr, |
| 76 | struct trace_array_cpu **data, | ||
| 77 | int *pc) | ||
| 44 | { | 78 | { |
| 45 | struct trace_array *tr = wakeup_trace; | ||
| 46 | struct trace_array_cpu *data; | ||
| 47 | unsigned long flags; | ||
| 48 | long disabled; | 79 | long disabled; |
| 49 | int cpu; | 80 | int cpu; |
| 50 | int pc; | ||
| 51 | 81 | ||
| 52 | if (likely(!wakeup_task)) | 82 | if (likely(!wakeup_task)) |
| 53 | return; | 83 | return 0; |
| 54 | 84 | ||
| 55 | pc = preempt_count(); | 85 | *pc = preempt_count(); |
| 56 | preempt_disable_notrace(); | 86 | preempt_disable_notrace(); |
| 57 | 87 | ||
| 58 | cpu = raw_smp_processor_id(); | 88 | cpu = raw_smp_processor_id(); |
| 59 | if (cpu != wakeup_current_cpu) | 89 | if (cpu != wakeup_current_cpu) |
| 60 | goto out_enable; | 90 | goto out_enable; |
| 61 | 91 | ||
| 62 | data = tr->data[cpu]; | 92 | *data = tr->data[cpu]; |
| 63 | disabled = atomic_inc_return(&data->disabled); | 93 | disabled = atomic_inc_return(&(*data)->disabled); |
| 64 | if (unlikely(disabled != 1)) | 94 | if (unlikely(disabled != 1)) |
| 65 | goto out; | 95 | goto out; |
| 66 | 96 | ||
| 67 | local_irq_save(flags); | 97 | return 1; |
| 68 | 98 | ||
| 69 | trace_function(tr, ip, parent_ip, flags, pc); | 99 | out: |
| 100 | atomic_dec(&(*data)->disabled); | ||
| 101 | |||
| 102 | out_enable: | ||
| 103 | preempt_enable_notrace(); | ||
| 104 | return 0; | ||
| 105 | } | ||
| 70 | 106 | ||
| 107 | /* | ||
| 108 | * wakeup uses its own tracer function to keep the overhead down: | ||
| 109 | */ | ||
| 110 | static void | ||
| 111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | ||
| 112 | { | ||
| 113 | struct trace_array *tr = wakeup_trace; | ||
| 114 | struct trace_array_cpu *data; | ||
| 115 | unsigned long flags; | ||
| 116 | int pc; | ||
| 117 | |||
| 118 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | ||
| 119 | return; | ||
| 120 | |||
| 121 | local_irq_save(flags); | ||
| 122 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 71 | local_irq_restore(flags); | 123 | local_irq_restore(flags); |
| 72 | 124 | ||
| 73 | out: | ||
| 74 | atomic_dec(&data->disabled); | 125 | atomic_dec(&data->disabled); |
| 75 | out_enable: | ||
| 76 | preempt_enable_notrace(); | 126 | preempt_enable_notrace(); |
| 77 | } | 127 | } |
| 78 | 128 | ||
| @@ -82,6 +132,156 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 82 | }; | 132 | }; |
| 83 | #endif /* CONFIG_FUNCTION_TRACER */ | 133 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 84 | 134 | ||
| 135 | static int start_func_tracer(int graph) | ||
| 136 | { | ||
| 137 | int ret; | ||
| 138 | |||
| 139 | if (!graph) | ||
| 140 | ret = register_ftrace_function(&trace_ops); | ||
| 141 | else | ||
| 142 | ret = register_ftrace_graph(&wakeup_graph_return, | ||
| 143 | &wakeup_graph_entry); | ||
| 144 | |||
| 145 | if (!ret && tracing_is_enabled()) | ||
| 146 | tracer_enabled = 1; | ||
| 147 | else | ||
| 148 | tracer_enabled = 0; | ||
| 149 | |||
| 150 | return ret; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void stop_func_tracer(int graph) | ||
| 154 | { | ||
| 155 | tracer_enabled = 0; | ||
| 156 | |||
| 157 | if (!graph) | ||
| 158 | unregister_ftrace_function(&trace_ops); | ||
| 159 | else | ||
| 160 | unregister_ftrace_graph(); | ||
| 161 | } | ||
| 162 | |||
| 163 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 164 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | ||
| 165 | { | ||
| 166 | |||
| 167 | if (!(bit & TRACE_DISPLAY_GRAPH)) | ||
| 168 | return -EINVAL; | ||
| 169 | |||
| 170 | if (!(is_graph() ^ set)) | ||
| 171 | return 0; | ||
| 172 | |||
| 173 | stop_func_tracer(!set); | ||
| 174 | |||
| 175 | wakeup_reset(wakeup_trace); | ||
| 176 | tracing_max_latency = 0; | ||
| 177 | |||
| 178 | return start_func_tracer(set); | ||
| 179 | } | ||
| 180 | |||
| 181 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | ||
| 182 | { | ||
| 183 | struct trace_array *tr = wakeup_trace; | ||
| 184 | struct trace_array_cpu *data; | ||
| 185 | unsigned long flags; | ||
| 186 | int pc, ret = 0; | ||
| 187 | |||
| 188 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | ||
| 189 | return 0; | ||
| 190 | |||
| 191 | local_save_flags(flags); | ||
| 192 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
| 193 | atomic_dec(&data->disabled); | ||
| 194 | preempt_enable_notrace(); | ||
| 195 | |||
| 196 | return ret; | ||
| 197 | } | ||
| 198 | |||
| 199 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) | ||
| 200 | { | ||
| 201 | struct trace_array *tr = wakeup_trace; | ||
| 202 | struct trace_array_cpu *data; | ||
| 203 | unsigned long flags; | ||
| 204 | int pc; | ||
| 205 | |||
| 206 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | ||
| 207 | return; | ||
| 208 | |||
| 209 | local_save_flags(flags); | ||
| 210 | __trace_graph_return(tr, trace, flags, pc); | ||
| 211 | atomic_dec(&data->disabled); | ||
| 212 | |||
| 213 | preempt_enable_notrace(); | ||
| 214 | return; | ||
| 215 | } | ||
| 216 | |||
| 217 | static void wakeup_trace_open(struct trace_iterator *iter) | ||
| 218 | { | ||
| 219 | if (is_graph()) | ||
| 220 | graph_trace_open(iter); | ||
| 221 | } | ||
| 222 | |||
| 223 | static void wakeup_trace_close(struct trace_iterator *iter) | ||
| 224 | { | ||
| 225 | if (iter->private) | ||
| 226 | graph_trace_close(iter); | ||
| 227 | } | ||
| 228 | |||
| 229 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) | ||
| 230 | |||
| 231 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | ||
| 232 | { | ||
| 233 | /* | ||
| 234 | * In graph mode call the graph tracer output function, | ||
| 235 | * otherwise go with the TRACE_FN event handler | ||
| 236 | */ | ||
| 237 | if (is_graph()) | ||
| 238 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); | ||
| 239 | |||
| 240 | return TRACE_TYPE_UNHANDLED; | ||
| 241 | } | ||
| 242 | |||
| 243 | static void wakeup_print_header(struct seq_file *s) | ||
| 244 | { | ||
| 245 | if (is_graph()) | ||
| 246 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); | ||
| 247 | else | ||
| 248 | trace_default_header(s); | ||
| 249 | } | ||
| 250 | |||
| 251 | static void | ||
| 252 | __trace_function(struct trace_array *tr, | ||
| 253 | unsigned long ip, unsigned long parent_ip, | ||
| 254 | unsigned long flags, int pc) | ||
| 255 | { | ||
| 256 | if (is_graph()) | ||
| 257 | trace_graph_function(tr, ip, parent_ip, flags, pc); | ||
| 258 | else | ||
| 259 | trace_function(tr, ip, parent_ip, flags, pc); | ||
| 260 | } | ||
| 261 | #else | ||
| 262 | #define __trace_function trace_function | ||
| 263 | |||
| 264 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | ||
| 265 | { | ||
| 266 | return -EINVAL; | ||
| 267 | } | ||
| 268 | |||
| 269 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | ||
| 270 | { | ||
| 271 | return -1; | ||
| 272 | } | ||
| 273 | |||
| 274 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | ||
| 275 | { | ||
| 276 | return TRACE_TYPE_UNHANDLED; | ||
| 277 | } | ||
| 278 | |||
| 279 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } | ||
| 280 | static void wakeup_print_header(struct seq_file *s) { } | ||
| 281 | static void wakeup_trace_open(struct trace_iterator *iter) { } | ||
| 282 | static void wakeup_trace_close(struct trace_iterator *iter) { } | ||
| 283 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 284 | |||
| 85 | /* | 285 | /* |
| 86 | * Should this new latency be reported/recorded? | 286 | * Should this new latency be reported/recorded? |
| 87 | */ | 287 | */ |
| @@ -152,7 +352,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
| 152 | /* The task we are waiting for is waking up */ | 352 | /* The task we are waiting for is waking up */ |
| 153 | data = wakeup_trace->data[wakeup_cpu]; | 353 | data = wakeup_trace->data[wakeup_cpu]; |
| 154 | 354 | ||
| 155 | trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); | 355 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
| 156 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | 356 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
| 157 | 357 | ||
| 158 | T0 = data->preempt_timestamp; | 358 | T0 = data->preempt_timestamp; |
| @@ -252,7 +452,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
| 252 | * is not called by an assembly function (where as schedule is) | 452 | * is not called by an assembly function (where as schedule is) |
| 253 | * it should be safe to use it here. | 453 | * it should be safe to use it here. |
| 254 | */ | 454 | */ |
| 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 455 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
| 256 | 456 | ||
| 257 | out_locked: | 457 | out_locked: |
| 258 | arch_spin_unlock(&wakeup_lock); | 458 | arch_spin_unlock(&wakeup_lock); |
| @@ -303,12 +503,8 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
| 303 | */ | 503 | */ |
| 304 | smp_wmb(); | 504 | smp_wmb(); |
| 305 | 505 | ||
| 306 | register_ftrace_function(&trace_ops); | 506 | if (start_func_tracer(is_graph())) |
| 307 | 507 | printk(KERN_ERR "failed to start wakeup tracer\n"); | |
| 308 | if (tracing_is_enabled()) | ||
| 309 | tracer_enabled = 1; | ||
| 310 | else | ||
| 311 | tracer_enabled = 0; | ||
| 312 | 508 | ||
| 313 | return; | 509 | return; |
| 314 | fail_deprobe_wake_new: | 510 | fail_deprobe_wake_new: |
| @@ -320,7 +516,7 @@ fail_deprobe: | |||
| 320 | static void stop_wakeup_tracer(struct trace_array *tr) | 516 | static void stop_wakeup_tracer(struct trace_array *tr) |
| 321 | { | 517 | { |
| 322 | tracer_enabled = 0; | 518 | tracer_enabled = 0; |
| 323 | unregister_ftrace_function(&trace_ops); | 519 | stop_func_tracer(is_graph()); |
| 324 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); | 520 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
| 325 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); | 521 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
| 326 | unregister_trace_sched_wakeup(probe_wakeup, NULL); | 522 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
| @@ -379,9 +575,15 @@ static struct tracer wakeup_tracer __read_mostly = | |||
| 379 | .start = wakeup_tracer_start, | 575 | .start = wakeup_tracer_start, |
| 380 | .stop = wakeup_tracer_stop, | 576 | .stop = wakeup_tracer_stop, |
| 381 | .print_max = 1, | 577 | .print_max = 1, |
| 578 | .print_header = wakeup_print_header, | ||
| 579 | .print_line = wakeup_print_line, | ||
| 580 | .flags = &tracer_flags, | ||
| 581 | .set_flag = wakeup_set_flag, | ||
| 382 | #ifdef CONFIG_FTRACE_SELFTEST | 582 | #ifdef CONFIG_FTRACE_SELFTEST |
| 383 | .selftest = trace_selftest_startup_wakeup, | 583 | .selftest = trace_selftest_startup_wakeup, |
| 384 | #endif | 584 | #endif |
| 585 | .open = wakeup_trace_open, | ||
| 586 | .close = wakeup_trace_close, | ||
| 385 | .use_max_tr = 1, | 587 | .use_max_tr = 1, |
| 386 | }; | 588 | }; |
| 387 | 589 | ||
| @@ -394,9 +596,15 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
| 394 | .stop = wakeup_tracer_stop, | 596 | .stop = wakeup_tracer_stop, |
| 395 | .wait_pipe = poll_wait_pipe, | 597 | .wait_pipe = poll_wait_pipe, |
| 396 | .print_max = 1, | 598 | .print_max = 1, |
| 599 | .print_header = wakeup_print_header, | ||
| 600 | .print_line = wakeup_print_line, | ||
| 601 | .flags = &tracer_flags, | ||
| 602 | .set_flag = wakeup_set_flag, | ||
| 397 | #ifdef CONFIG_FTRACE_SELFTEST | 603 | #ifdef CONFIG_FTRACE_SELFTEST |
| 398 | .selftest = trace_selftest_startup_wakeup, | 604 | .selftest = trace_selftest_startup_wakeup, |
| 399 | #endif | 605 | #endif |
| 606 | .open = wakeup_trace_open, | ||
| 607 | .close = wakeup_trace_close, | ||
| 400 | .use_max_tr = 1, | 608 | .use_max_tr = 1, |
| 401 | }; | 609 | }; |
| 402 | 610 | ||
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index a7cc3793baf6..209b379a4721 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
| @@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void) | |||
| 263 | { | 263 | { |
| 264 | int ret, cpu; | 264 | int ret, cpu; |
| 265 | 265 | ||
| 266 | for_each_possible_cpu(cpu) { | ||
| 267 | spin_lock_init(&workqueue_cpu_stat(cpu)->lock); | ||
| 268 | INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); | ||
| 269 | } | ||
| 270 | |||
| 266 | ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); | 271 | ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); |
| 267 | if (ret) | 272 | if (ret) |
| 268 | goto out; | 273 | goto out; |
| @@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void) | |||
| 279 | if (ret) | 284 | if (ret) |
| 280 | goto no_creation; | 285 | goto no_creation; |
| 281 | 286 | ||
| 282 | for_each_possible_cpu(cpu) { | ||
| 283 | spin_lock_init(&workqueue_cpu_stat(cpu)->lock); | ||
| 284 | INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); | ||
| 285 | } | ||
| 286 | |||
| 287 | return 0; | 287 | return 0; |
| 288 | 288 | ||
| 289 | no_creation: | 289 | no_creation: |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index c77f3eceea25..e95ee7f31d43 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
| 28 | #include <linux/jump_label.h> | ||
| 28 | 29 | ||
| 29 | extern struct tracepoint __start___tracepoints[]; | 30 | extern struct tracepoint __start___tracepoints[]; |
| 30 | extern struct tracepoint __stop___tracepoints[]; | 31 | extern struct tracepoint __stop___tracepoints[]; |
| @@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 263 | * is used. | 264 | * is used. |
| 264 | */ | 265 | */ |
| 265 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
| 266 | elem->state = active; | 267 | if (!elem->state && active) { |
| 268 | jump_label_enable(&elem->state); | ||
| 269 | elem->state = active; | ||
| 270 | } else if (elem->state && !active) { | ||
| 271 | jump_label_disable(&elem->state); | ||
| 272 | elem->state = active; | ||
| 273 | } | ||
| 267 | } | 274 | } |
| 268 | 275 | ||
| 269 | /* | 276 | /* |
| @@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem) | |||
| 277 | if (elem->unregfunc && elem->state) | 284 | if (elem->unregfunc && elem->state) |
| 278 | elem->unregfunc(); | 285 | elem->unregfunc(); |
| 279 | 286 | ||
| 280 | elem->state = 0; | 287 | if (elem->state) { |
| 288 | jump_label_disable(&elem->state); | ||
| 289 | elem->state = 0; | ||
| 290 | } | ||
| 281 | rcu_assign_pointer(elem->funcs, NULL); | 291 | rcu_assign_pointer(elem->funcs, NULL); |
| 282 | } | 292 | } |
| 283 | 293 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 7f9c3c52ecc1..bafba687a6d8 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
| 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
| 44 | #endif | 44 | #endif |
| 45 | 45 | ||
| 46 | static int __read_mostly did_panic; | ||
| 47 | static int __initdata no_watchdog; | 46 | static int __initdata no_watchdog; |
| 48 | 47 | ||
| 49 | 48 | ||
| @@ -187,18 +186,6 @@ static int is_softlockup(unsigned long touch_ts) | |||
| 187 | return 0; | 186 | return 0; |
| 188 | } | 187 | } |
| 189 | 188 | ||
| 190 | static int | ||
| 191 | watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
| 192 | { | ||
| 193 | did_panic = 1; | ||
| 194 | |||
| 195 | return NOTIFY_DONE; | ||
| 196 | } | ||
| 197 | |||
| 198 | static struct notifier_block panic_block = { | ||
| 199 | .notifier_call = watchdog_panic, | ||
| 200 | }; | ||
| 201 | |||
| 202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 189 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 203 | static struct perf_event_attr wd_hw_attr = { | 190 | static struct perf_event_attr wd_hw_attr = { |
| 204 | .type = PERF_TYPE_HARDWARE, | 191 | .type = PERF_TYPE_HARDWARE, |
| @@ -209,7 +196,7 @@ static struct perf_event_attr wd_hw_attr = { | |||
| 209 | }; | 196 | }; |
| 210 | 197 | ||
| 211 | /* Callback function for perf event subsystem */ | 198 | /* Callback function for perf event subsystem */ |
| 212 | void watchdog_overflow_callback(struct perf_event *event, int nmi, | 199 | static void watchdog_overflow_callback(struct perf_event *event, int nmi, |
| 213 | struct perf_sample_data *data, | 200 | struct perf_sample_data *data, |
| 214 | struct pt_regs *regs) | 201 | struct pt_regs *regs) |
| 215 | { | 202 | { |
| @@ -371,14 +358,14 @@ static int watchdog_nmi_enable(int cpu) | |||
| 371 | /* Try to register using hardware perf events */ | 358 | /* Try to register using hardware perf events */ |
| 372 | wd_attr = &wd_hw_attr; | 359 | wd_attr = &wd_hw_attr; |
| 373 | wd_attr->sample_period = hw_nmi_get_sample_period(); | 360 | wd_attr->sample_period = hw_nmi_get_sample_period(); |
| 374 | event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); | 361 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); |
| 375 | if (!IS_ERR(event)) { | 362 | if (!IS_ERR(event)) { |
| 376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | 363 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); |
| 377 | goto out_save; | 364 | goto out_save; |
| 378 | } | 365 | } |
| 379 | 366 | ||
| 380 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); | 367 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); |
| 381 | return -1; | 368 | return PTR_ERR(event); |
| 382 | 369 | ||
| 383 | /* success path */ | 370 | /* success path */ |
| 384 | out_save: | 371 | out_save: |
| @@ -422,17 +409,19 @@ static int watchdog_prepare_cpu(int cpu) | |||
| 422 | static int watchdog_enable(int cpu) | 409 | static int watchdog_enable(int cpu) |
| 423 | { | 410 | { |
| 424 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | 411 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
| 412 | int err; | ||
| 425 | 413 | ||
| 426 | /* enable the perf event */ | 414 | /* enable the perf event */ |
| 427 | if (watchdog_nmi_enable(cpu) != 0) | 415 | err = watchdog_nmi_enable(cpu); |
| 428 | return -1; | 416 | if (err) |
| 417 | return err; | ||
| 429 | 418 | ||
| 430 | /* create the watchdog thread */ | 419 | /* create the watchdog thread */ |
| 431 | if (!p) { | 420 | if (!p) { |
| 432 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | 421 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); |
| 433 | if (IS_ERR(p)) { | 422 | if (IS_ERR(p)) { |
| 434 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | 423 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); |
| 435 | return -1; | 424 | return PTR_ERR(p); |
| 436 | } | 425 | } |
| 437 | kthread_bind(p, cpu); | 426 | kthread_bind(p, cpu); |
| 438 | per_cpu(watchdog_touch_ts, cpu) = 0; | 427 | per_cpu(watchdog_touch_ts, cpu) = 0; |
| @@ -484,6 +473,9 @@ static void watchdog_disable_all_cpus(void) | |||
| 484 | { | 473 | { |
| 485 | int cpu; | 474 | int cpu; |
| 486 | 475 | ||
| 476 | if (no_watchdog) | ||
| 477 | return; | ||
| 478 | |||
| 487 | for_each_online_cpu(cpu) | 479 | for_each_online_cpu(cpu) |
| 488 | watchdog_disable(cpu); | 480 | watchdog_disable(cpu); |
| 489 | 481 | ||
| @@ -526,17 +518,16 @@ static int __cpuinit | |||
| 526 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 518 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 527 | { | 519 | { |
| 528 | int hotcpu = (unsigned long)hcpu; | 520 | int hotcpu = (unsigned long)hcpu; |
| 521 | int err = 0; | ||
| 529 | 522 | ||
| 530 | switch (action) { | 523 | switch (action) { |
| 531 | case CPU_UP_PREPARE: | 524 | case CPU_UP_PREPARE: |
| 532 | case CPU_UP_PREPARE_FROZEN: | 525 | case CPU_UP_PREPARE_FROZEN: |
| 533 | if (watchdog_prepare_cpu(hotcpu)) | 526 | err = watchdog_prepare_cpu(hotcpu); |
| 534 | return NOTIFY_BAD; | ||
| 535 | break; | 527 | break; |
| 536 | case CPU_ONLINE: | 528 | case CPU_ONLINE: |
| 537 | case CPU_ONLINE_FROZEN: | 529 | case CPU_ONLINE_FROZEN: |
| 538 | if (watchdog_enable(hotcpu)) | 530 | err = watchdog_enable(hotcpu); |
| 539 | return NOTIFY_BAD; | ||
| 540 | break; | 531 | break; |
| 541 | #ifdef CONFIG_HOTPLUG_CPU | 532 | #ifdef CONFIG_HOTPLUG_CPU |
| 542 | case CPU_UP_CANCELED: | 533 | case CPU_UP_CANCELED: |
| @@ -549,7 +540,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 549 | break; | 540 | break; |
| 550 | #endif /* CONFIG_HOTPLUG_CPU */ | 541 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 551 | } | 542 | } |
| 552 | return NOTIFY_OK; | 543 | return notifier_from_errno(err); |
| 553 | } | 544 | } |
| 554 | 545 | ||
| 555 | static struct notifier_block __cpuinitdata cpu_nfb = { | 546 | static struct notifier_block __cpuinitdata cpu_nfb = { |
| @@ -565,13 +556,11 @@ static int __init spawn_watchdog_task(void) | |||
| 565 | return 0; | 556 | return 0; |
| 566 | 557 | ||
| 567 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 558 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
| 568 | WARN_ON(err == NOTIFY_BAD); | 559 | WARN_ON(notifier_to_errno(err)); |
| 569 | 560 | ||
| 570 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 561 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
| 571 | register_cpu_notifier(&cpu_nfb); | 562 | register_cpu_notifier(&cpu_nfb); |
| 572 | 563 | ||
| 573 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | ||
| 574 | |||
| 575 | return 0; | 564 | return 0; |
| 576 | } | 565 | } |
| 577 | early_initcall(spawn_watchdog_task); | 566 | early_initcall(spawn_watchdog_task); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 727f24e563ae..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
| 3 | * | 3 | * |
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * |
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * |
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
| 10 | * | 13 | * |
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * |
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ |
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
