diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 16 | ||||
-rw-r--r-- | kernel/hrtimer.c | 4 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 70 | ||||
-rw-r--r-- | kernel/sched_stats.h | 33 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 20 |
6 files changed, 36 insertions, 109 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index bf0cef8bbdf2..242a706e7721 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
818 | { | 818 | { |
819 | struct signal_struct *sig; | 819 | struct signal_struct *sig; |
820 | int ret; | ||
821 | 820 | ||
822 | if (clone_flags & CLONE_THREAD) { | 821 | if (clone_flags & CLONE_THREAD) { |
823 | ret = thread_group_cputime_clone_thread(current); | 822 | atomic_inc(¤t->signal->count); |
824 | if (likely(!ret)) { | 823 | atomic_inc(¤t->signal->live); |
825 | atomic_inc(¤t->signal->count); | 824 | return 0; |
826 | atomic_inc(¤t->signal->live); | ||
827 | } | ||
828 | return ret; | ||
829 | } | 825 | } |
830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 826 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
827 | |||
828 | if (sig) | ||
829 | posix_cpu_timers_init_group(sig); | ||
830 | |||
831 | tsk->signal = sig; | 831 | tsk->signal = sig; |
832 | if (!sig) | 832 | if (!sig) |
833 | return -ENOMEM; | 833 | return -ENOMEM; |
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
865 | task_unlock(current->group_leader); | 865 | task_unlock(current->group_leader); |
866 | 866 | ||
867 | posix_cpu_timers_init_group(sig); | ||
868 | |||
869 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
870 | 868 | ||
871 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2dc30c59c5fd..f33afb0407bc 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -614,7 +614,9 @@ void clock_was_set(void) | |||
614 | */ | 614 | */ |
615 | void hres_timers_resume(void) | 615 | void hres_timers_resume(void) |
616 | { | 616 | { |
617 | /* Retrigger the CPU local events: */ | 617 | WARN_ONCE(!irqs_disabled(), |
618 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
619 | |||
618 | retrigger_next_event(NULL); | 620 | retrigger_next_event(NULL); |
619 | } | 621 | } |
620 | 622 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a47832..fa07da94d7be 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -10,76 +10,6 @@ | |||
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Allocate the thread_group_cputime structure appropriately and fill in the | ||
14 | * current values of the fields. Called from copy_signal() via | ||
15 | * thread_group_cputime_clone_thread() when adding a second or subsequent | ||
16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
17 | */ | ||
18 | int thread_group_cputime_alloc(struct task_struct *tsk) | ||
19 | { | ||
20 | struct signal_struct *sig = tsk->signal; | ||
21 | struct task_cputime *cputime; | ||
22 | |||
23 | /* | ||
24 | * If we have multiple threads and we don't already have a | ||
25 | * per-CPU task_cputime struct (checked in the caller), allocate | ||
26 | * one and fill it in with the times accumulated so far. We may | ||
27 | * race with another thread so recheck after we pick up the sighand | ||
28 | * lock. | ||
29 | */ | ||
30 | cputime = alloc_percpu(struct task_cputime); | ||
31 | if (cputime == NULL) | ||
32 | return -ENOMEM; | ||
33 | spin_lock_irq(&tsk->sighand->siglock); | ||
34 | if (sig->cputime.totals) { | ||
35 | spin_unlock_irq(&tsk->sighand->siglock); | ||
36 | free_percpu(cputime); | ||
37 | return 0; | ||
38 | } | ||
39 | sig->cputime.totals = cputime; | ||
40 | cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); | ||
41 | cputime->utime = tsk->utime; | ||
42 | cputime->stime = tsk->stime; | ||
43 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
44 | spin_unlock_irq(&tsk->sighand->siglock); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * thread_group_cputime - Sum the thread group time fields across all CPUs. | ||
50 | * | ||
51 | * @tsk: The task we use to identify the thread group. | ||
52 | * @times: task_cputime structure in which we return the summed fields. | ||
53 | * | ||
54 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
55 | * time structure. | ||
56 | */ | ||
57 | void thread_group_cputime( | ||
58 | struct task_struct *tsk, | ||
59 | struct task_cputime *times) | ||
60 | { | ||
61 | struct task_cputime *totals, *tot; | ||
62 | int i; | ||
63 | |||
64 | totals = tsk->signal->cputime.totals; | ||
65 | if (!totals) { | ||
66 | times->utime = tsk->utime; | ||
67 | times->stime = tsk->stime; | ||
68 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | times->stime = times->utime = cputime_zero; | ||
73 | times->sum_exec_runtime = 0; | ||
74 | for_each_possible_cpu(i) { | ||
75 | tot = per_cpu_ptr(totals, i); | ||
76 | times->utime = cputime_add(times->utime, tot->utime); | ||
77 | times->stime = cputime_add(times->stime, tot->stime); | ||
78 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
84 | */ | 14 | */ |
85 | void update_rlimit_cpu(unsigned long rlim_new) | 15 | void update_rlimit_cpu(unsigned long rlim_new) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d1226..8ab0cef8ecab 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
296 | static inline void account_group_user_time(struct task_struct *tsk, | 296 | static inline void account_group_user_time(struct task_struct *tsk, |
297 | cputime_t cputime) | 297 | cputime_t cputime) |
298 | { | 298 | { |
299 | struct task_cputime *times; | ||
299 | struct signal_struct *sig; | 300 | struct signal_struct *sig; |
300 | 301 | ||
301 | /* tsk == current, ensure it is safe to use ->signal */ | 302 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
303 | return; | 304 | return; |
304 | 305 | ||
305 | sig = tsk->signal; | 306 | sig = tsk->signal; |
306 | if (sig->cputime.totals) { | 307 | times = &sig->cputime.totals; |
307 | struct task_cputime *times; | ||
308 | 308 | ||
309 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 309 | spin_lock(×->lock); |
310 | times->utime = cputime_add(times->utime, cputime); | 310 | times->utime = cputime_add(times->utime, cputime); |
311 | put_cpu_no_resched(); | 311 | spin_unlock(×->lock); |
312 | } | ||
313 | } | 312 | } |
314 | 313 | ||
315 | /** | 314 | /** |
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
325 | static inline void account_group_system_time(struct task_struct *tsk, | 324 | static inline void account_group_system_time(struct task_struct *tsk, |
326 | cputime_t cputime) | 325 | cputime_t cputime) |
327 | { | 326 | { |
327 | struct task_cputime *times; | ||
328 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
329 | 329 | ||
330 | /* tsk == current, ensure it is safe to use ->signal */ | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
332 | return; | 332 | return; |
333 | 333 | ||
334 | sig = tsk->signal; | 334 | sig = tsk->signal; |
335 | if (sig->cputime.totals) { | 335 | times = &sig->cputime.totals; |
336 | struct task_cputime *times; | ||
337 | 336 | ||
338 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 337 | spin_lock(×->lock); |
339 | times->stime = cputime_add(times->stime, cputime); | 338 | times->stime = cputime_add(times->stime, cputime); |
340 | put_cpu_no_resched(); | 339 | spin_unlock(×->lock); |
341 | } | ||
342 | } | 340 | } |
343 | 341 | ||
344 | /** | 342 | /** |
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
354 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 352 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
355 | unsigned long long ns) | 353 | unsigned long long ns) |
356 | { | 354 | { |
355 | struct task_cputime *times; | ||
357 | struct signal_struct *sig; | 356 | struct signal_struct *sig; |
358 | 357 | ||
359 | sig = tsk->signal; | 358 | sig = tsk->signal; |
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
362 | if (unlikely(!sig)) | 361 | if (unlikely(!sig)) |
363 | return; | 362 | return; |
364 | 363 | ||
365 | if (sig->cputime.totals) { | 364 | times = &sig->cputime.totals; |
366 | struct task_cputime *times; | ||
367 | 365 | ||
368 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 366 | spin_lock(×->lock); |
369 | times->sum_exec_runtime += ns; | 367 | times->sum_exec_runtime += ns; |
370 | put_cpu_no_resched(); | 368 | spin_unlock(×->lock); |
371 | } | ||
372 | } | 369 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0a..d3f1ef4d5cbe 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); | |||
134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
136 | */ | 136 | */ |
137 | void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(void) |
138 | { | 138 | { |
139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae37..1f0c509b40d3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -971,6 +971,8 @@ undo: | |||
971 | } | 971 | } |
972 | 972 | ||
973 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
974 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
975 | |||
974 | struct work_for_cpu { | 976 | struct work_for_cpu { |
975 | struct work_struct work; | 977 | struct work_struct work; |
976 | long (*fn)(void *); | 978 | long (*fn)(void *); |
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w) | |||
991 | * @fn: the function to run | 993 | * @fn: the function to run |
992 | * @arg: the function arg | 994 | * @arg: the function arg |
993 | * | 995 | * |
994 | * This will return -EINVAL in the cpu is not online, or the return value | 996 | * This will return the value @fn returns. |
995 | * of @fn otherwise. | 997 | * It is up to the caller to ensure that the cpu doesn't go offline. |
996 | */ | 998 | */ |
997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 999 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
998 | { | 1000 | { |
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | 1003 | INIT_WORK(&wfc.work, do_work_for_cpu); |
1002 | wfc.fn = fn; | 1004 | wfc.fn = fn; |
1003 | wfc.arg = arg; | 1005 | wfc.arg = arg; |
1004 | get_online_cpus(); | 1006 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); |
1005 | if (unlikely(!cpu_online(cpu))) | 1007 | flush_work(&wfc.work); |
1006 | wfc.ret = -EINVAL; | ||
1007 | else { | ||
1008 | schedule_work_on(cpu, &wfc.work); | ||
1009 | flush_work(&wfc.work); | ||
1010 | } | ||
1011 | put_online_cpus(); | ||
1012 | 1008 | ||
1013 | return wfc.ret; | 1009 | return wfc.ret; |
1014 | } | 1010 | } |
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void) | |||
1025 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1021 | hotcpu_notifier(workqueue_cpu_callback, 0); |
1026 | keventd_wq = create_workqueue("events"); | 1022 | keventd_wq = create_workqueue("events"); |
1027 | BUG_ON(!keventd_wq); | 1023 | BUG_ON(!keventd_wq); |
1024 | #ifdef CONFIG_SMP | ||
1025 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
1026 | BUG_ON(!work_on_cpu_wq); | ||
1027 | #endif | ||
1028 | } | 1028 | } |