diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/compat.c | 21 | ||||
| -rw-r--r-- | kernel/perf_event.c | 18 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 27 | 
5 files changed, 55 insertions, 21 deletions
| diff --git a/kernel/compat.c b/kernel/compat.c index e167efce8423..c9e2ec0b34a8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
| 1126 | 1126 | ||
| 1127 | return 0; | 1127 | return 0; | 
| 1128 | } | 1128 | } | 
| 1129 | |||
| 1130 | /* | ||
| 1131 | * Allocate user-space memory for the duration of a single system call, | ||
| 1132 | * in order to marshall parameters inside a compat thunk. | ||
| 1133 | */ | ||
| 1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
| 1135 | { | ||
| 1136 | void __user *ptr; | ||
| 1137 | |||
| 1138 | /* If len would occupy more than half of the entire compat space... */ | ||
| 1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
| 1140 | return NULL; | ||
| 1141 | |||
| 1142 | ptr = arch_compat_alloc_user_space(len); | ||
| 1143 | |||
| 1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
| 1145 | return NULL; | ||
| 1146 | |||
| 1147 | return ptr; | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
| diff --git a/kernel/perf_event.c b/kernel/perf_event.c index baae1367e945..c16158c77dfd 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -3836,18 +3836,20 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
| 3836 | 3836 | ||
| 3837 | rcu_read_lock(); | 3837 | rcu_read_lock(); | 
| 3838 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 3838 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| 3839 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 3839 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| 3840 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3840 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 
| 3841 | 3841 | ||
| 3842 | ctx = task_event->task_ctx; | 3842 | ctx = task_event->task_ctx; | 
| 3843 | if (!ctx) { | 3843 | if (!ctx) { | 
| 3844 | ctxn = pmu->task_ctx_nr; | 3844 | ctxn = pmu->task_ctx_nr; | 
| 3845 | if (ctxn < 0) | 3845 | if (ctxn < 0) | 
| 3846 | continue; | 3846 | goto next; | 
| 3847 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 3847 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
| 3848 | } | 3848 | } | 
| 3849 | if (ctx) | 3849 | if (ctx) | 
| 3850 | perf_event_task_ctx(ctx, task_event); | 3850 | perf_event_task_ctx(ctx, task_event); | 
| 3851 | next: | ||
| 3852 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 3851 | } | 3853 | } | 
| 3852 | rcu_read_unlock(); | 3854 | rcu_read_unlock(); | 
| 3853 | } | 3855 | } | 
| @@ -3969,16 +3971,18 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
| 3969 | 3971 | ||
| 3970 | rcu_read_lock(); | 3972 | rcu_read_lock(); | 
| 3971 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 3973 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| 3972 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 3974 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| 3973 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 3975 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 
| 3974 | 3976 | ||
| 3975 | ctxn = pmu->task_ctx_nr; | 3977 | ctxn = pmu->task_ctx_nr; | 
| 3976 | if (ctxn < 0) | 3978 | if (ctxn < 0) | 
| 3977 | continue; | 3979 | goto next; | 
| 3978 | 3980 | ||
| 3979 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 3981 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
| 3980 | if (ctx) | 3982 | if (ctx) | 
| 3981 | perf_event_comm_ctx(ctx, comm_event); | 3983 | perf_event_comm_ctx(ctx, comm_event); | 
| 3984 | next: | ||
| 3985 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 3982 | } | 3986 | } | 
| 3983 | rcu_read_unlock(); | 3987 | rcu_read_unlock(); | 
| 3984 | } | 3988 | } | 
| @@ -4152,19 +4156,21 @@ got_name: | |||
| 4152 | 4156 | ||
| 4153 | rcu_read_lock(); | 4157 | rcu_read_lock(); | 
| 4154 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4158 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 
| 4155 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 4159 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 
| 4156 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | 4160 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | 
| 4157 | vma->vm_flags & VM_EXEC); | 4161 | vma->vm_flags & VM_EXEC); | 
| 4158 | 4162 | ||
| 4159 | ctxn = pmu->task_ctx_nr; | 4163 | ctxn = pmu->task_ctx_nr; | 
| 4160 | if (ctxn < 0) | 4164 | if (ctxn < 0) | 
| 4161 | continue; | 4165 | goto next; | 
| 4162 | 4166 | ||
| 4163 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 4167 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 
| 4164 | if (ctx) { | 4168 | if (ctx) { | 
| 4165 | perf_event_mmap_ctx(ctx, mmap_event, | 4169 | perf_event_mmap_ctx(ctx, mmap_event, | 
| 4166 | vma->vm_flags & VM_EXEC); | 4170 | vma->vm_flags & VM_EXEC); | 
| 4167 | } | 4171 | } | 
| 4172 | next: | ||
| 4173 | put_cpu_ptr(pmu->pmu_cpu_context); | ||
| 4168 | } | 4174 | } | 
| 4169 | rcu_read_unlock(); | 4175 | rcu_read_unlock(); | 
| 4170 | 4176 | ||
| diff --git a/kernel/sched.c b/kernel/sched.c index 794819eab9ca..c0d2067f3e0d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3513,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 
| 3514 | 3514 | ||
| 3515 | if (total) { | 3515 | if (total) { | 
| 3516 | u64 temp; | 3516 | u64 temp = rtime; | 
| 3517 | 3517 | ||
| 3518 | temp = (u64)(rtime * utime); | 3518 | temp *= utime; | 
| 3519 | do_div(temp, total); | 3519 | do_div(temp, total); | 
| 3520 | utime = (cputime_t)temp; | 3520 | utime = (cputime_t)temp; | 
| 3521 | } else | 3521 | } else | 
| @@ -3546,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 
| 3547 | 3547 | ||
| 3548 | if (total) { | 3548 | if (total) { | 
| 3549 | u64 temp; | 3549 | u64 temp = rtime; | 
| 3550 | 3550 | ||
| 3551 | temp = (u64)(rtime * cputime.utime); | 3551 | temp *= cputime.utime; | 
| 3552 | do_div(temp, total); | 3552 | do_div(temp, total); | 
| 3553 | utime = (cputime_t)temp; | 3553 | utime = (cputime_t)temp; | 
| 3554 | } else | 3554 | } else | 
| diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a171138a9402..db3f674ca49d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -3630,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
| 3630 | if (time_before(now, nohz.next_balance)) | 3630 | if (time_before(now, nohz.next_balance)) | 
| 3631 | return 0; | 3631 | return 0; | 
| 3632 | 3632 | ||
| 3633 | if (!rq->nr_running) | 3633 | if (rq->idle_at_tick) | 
| 3634 | return 0; | 3634 | return 0; | 
| 3635 | 3635 | ||
| 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 
| diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 727f24e563ae..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* | 
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool | 
| 3 | * | 3 | * | 
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar | 
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * | 
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: | 
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * | 
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. | 
| 10 | * | 13 | * | 
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH | 
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> | 
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * | 
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are | 
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ | 
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> | 
