diff options
author | Peter Zijlstra <peterz@infradead.org> | 2008-11-24 11:06:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-07 12:52:44 -0500 |
commit | 490dea45d00f01847ebebd007685d564aaf2cd98 (patch) | |
tree | a1f559fd497b10c21479b378ffb262d517cb627b /include/linux/sched.h | |
parent | ede6f5aea054d3fb67c78857f7abdee602302043 (diff) |
itimers: remove the per-cpu-ish-ness
Either we bounce once cacheline per cpu per tick, yielding n^2 bounces
or we just bounce a single..
Also, using per-cpu allocations for the thread-groups complicates the
per-cpu allocator in that its currently aimed to be a fixed sized
allocator and the only possible extention to that would be vmap based,
which is seriously constrained on 32 bit archs.
So making the per-cpu memory requirement depend on the number of
processes is an issue.
Lastly, it didn't deal with cpu-hotplug, although admittedly that might
be fixable.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f8..c20943eabb4c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -450,6 +450,7 @@ struct task_cputime { | |||
450 | cputime_t utime; | 450 | cputime_t utime; |
451 | cputime_t stime; | 451 | cputime_t stime; |
452 | unsigned long long sum_exec_runtime; | 452 | unsigned long long sum_exec_runtime; |
453 | spinlock_t lock; | ||
453 | }; | 454 | }; |
454 | /* Alternate field names when used to cache expirations. */ | 455 | /* Alternate field names when used to cache expirations. */ |
455 | #define prof_exp stime | 456 | #define prof_exp stime |
@@ -465,7 +466,7 @@ struct task_cputime { | |||
465 | * used for thread group CPU clock calculations. | 466 | * used for thread group CPU clock calculations. |
466 | */ | 467 | */ |
467 | struct thread_group_cputime { | 468 | struct thread_group_cputime { |
468 | struct task_cputime *totals; | 469 | struct task_cputime totals; |
469 | }; | 470 | }; |
470 | 471 | ||
471 | /* | 472 | /* |
@@ -2180,24 +2181,30 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2180 | * Thread group CPU time accounting. | 2181 | * Thread group CPU time accounting. |
2181 | */ | 2182 | */ |
2182 | 2183 | ||
2183 | extern int thread_group_cputime_alloc(struct task_struct *); | 2184 | static inline |
2184 | extern void thread_group_cputime(struct task_struct *, struct task_cputime *); | 2185 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
2185 | |||
2186 | static inline void thread_group_cputime_init(struct signal_struct *sig) | ||
2187 | { | 2186 | { |
2188 | sig->cputime.totals = NULL; | 2187 | struct task_cputime *totals = &tsk->signal->cputime.totals; |
2188 | unsigned long flags; | ||
2189 | |||
2190 | spin_lock_irqsave(&totals->lock, flags); | ||
2191 | *times = *totals; | ||
2192 | spin_unlock_irqrestore(&totals->lock, flags); | ||
2189 | } | 2193 | } |
2190 | 2194 | ||
2191 | static inline int thread_group_cputime_clone_thread(struct task_struct *curr) | 2195 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
2192 | { | 2196 | { |
2193 | if (curr->signal->cputime.totals) | 2197 | sig->cputime.totals = (struct task_cputime){ |
2194 | return 0; | 2198 | .utime = cputime_zero, |
2195 | return thread_group_cputime_alloc(curr); | 2199 | .stime = cputime_zero, |
2200 | .sum_exec_runtime = 0, | ||
2201 | }; | ||
2202 | |||
2203 | spin_lock_init(&sig->cputime.totals.lock); | ||
2196 | } | 2204 | } |
2197 | 2205 | ||
2198 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2206 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
2199 | { | 2207 | { |
2200 | free_percpu(sig->cputime.totals); | ||
2201 | } | 2208 | } |
2202 | 2209 | ||
2203 | /* | 2210 | /* |