aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h257
1 files changed, 244 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad15..26d7a5f2d0ba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -425,6 +425,45 @@ struct pacct_struct {
425 unsigned long ac_minflt, ac_majflt; 425 unsigned long ac_minflt, ac_majflt;
426}; 426};
427 427
428/**
429 * struct task_cputime - collected CPU time counts
430 * @utime: time spent in user mode, in &cputime_t units
431 * @stime: time spent in kernel mode, in &cputime_t units
432 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
433 *
434 * This structure groups together three kinds of CPU time that are
435 * tracked for threads and thread groups. Most things considering
436 * CPU time want to group these counts together and treat all three
437 * of them in parallel.
438 */
439struct task_cputime {
440 cputime_t utime;
441 cputime_t stime;
442 unsigned long long sum_exec_runtime;
443};
444/* Alternate field names when used to cache expirations. */
445#define prof_exp stime
446#define virt_exp utime
447#define sched_exp sum_exec_runtime
448
449/**
450 * struct thread_group_cputime - thread group interval timer counts
451 * @totals: thread group interval timers; substructure for
452 * uniprocessor kernel, per-cpu for SMP kernel.
453 *
454 * This structure contains the version of task_cputime, above, that is
455 * used for thread group CPU clock calculations.
456 */
457#ifdef CONFIG_SMP
458struct thread_group_cputime {
459 struct task_cputime *totals;
460};
461#else
462struct thread_group_cputime {
463 struct task_cputime totals;
464};
465#endif
466
428/* 467/*
429 * NOTE! "signal_struct" does not have it's own 468 * NOTE! "signal_struct" does not have it's own
430 * locking, because a shared signal_struct always 469 * locking, because a shared signal_struct always
@@ -470,6 +509,17 @@ struct signal_struct {
470 cputime_t it_prof_expires, it_virt_expires; 509 cputime_t it_prof_expires, it_virt_expires;
471 cputime_t it_prof_incr, it_virt_incr; 510 cputime_t it_prof_incr, it_virt_incr;
472 511
512 /*
513 * Thread group totals for process CPU clocks.
514 * See thread_group_cputime(), et al, for details.
515 */
516 struct thread_group_cputime cputime;
517
518 /* Earliest-expiration cache. */
519 struct task_cputime cputime_expires;
520
521 struct list_head cpu_timers[3];
522
473 /* job control IDs */ 523 /* job control IDs */
474 524
475 /* 525 /*
@@ -500,7 +550,7 @@ struct signal_struct {
500 * Live threads maintain their own counters and add to these 550 * Live threads maintain their own counters and add to these
501 * in __exit_signal, except for the group leader. 551 * in __exit_signal, except for the group leader.
502 */ 552 */
503 cputime_t utime, stime, cutime, cstime; 553 cputime_t cutime, cstime;
504 cputime_t gtime; 554 cputime_t gtime;
505 cputime_t cgtime; 555 cputime_t cgtime;
506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 556 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +559,6 @@ struct signal_struct {
509 struct task_io_accounting ioac; 559 struct task_io_accounting ioac;
510 560
511 /* 561 /*
512 * Cumulative ns of scheduled CPU time for dead threads in the
513 * group, not including a zombie group leader. (This only differs
514 * from jiffies_to_ns(utime + stime) if sched_clock uses something
515 * other than jiffies.)
516 */
517 unsigned long long sum_sched_runtime;
518
519 /*
520 * We don't bother to synchronize most readers of this at all, 562 * We don't bother to synchronize most readers of this at all,
521 * because there is no reader checking a limit that actually needs 563 * because there is no reader checking a limit that actually needs
522 * to get both rlim_cur and rlim_max atomically, and either one 564 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,8 +569,6 @@ struct signal_struct {
527 */ 569 */
528 struct rlimit rlim[RLIM_NLIMITS]; 570 struct rlimit rlim[RLIM_NLIMITS];
529 571
530 struct list_head cpu_timers[3];
531
532 /* keep the process-shared keyrings here so that they do the right 572 /* keep the process-shared keyrings here so that they do the right
533 * thing in threads created with CLONE_THREAD */ 573 * thing in threads created with CLONE_THREAD */
534#ifdef CONFIG_KEYS 574#ifdef CONFIG_KEYS
@@ -1134,8 +1174,7 @@ struct task_struct {
1134/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1174/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1135 unsigned long min_flt, maj_flt; 1175 unsigned long min_flt, maj_flt;
1136 1176
1137 cputime_t it_prof_expires, it_virt_expires; 1177 struct task_cputime cputime_expires;
1138 unsigned long long it_sched_expires;
1139 struct list_head cpu_timers[3]; 1178 struct list_head cpu_timers[3];
1140 1179
1141/* process credentials */ 1180/* process credentials */
@@ -1585,6 +1624,7 @@ extern unsigned long long cpu_clock(int cpu);
1585 1624
1586extern unsigned long long 1625extern unsigned long long
1587task_sched_runtime(struct task_struct *task); 1626task_sched_runtime(struct task_struct *task);
1627extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1588 1628
1589/* sched_exec is called by processes performing an exec */ 1629/* sched_exec is called by processes performing an exec */
1590#ifdef CONFIG_SMP 1630#ifdef CONFIG_SMP
@@ -2082,6 +2122,197 @@ static inline int spin_needbreak(spinlock_t *lock)
2082} 2122}
2083 2123
2084/* 2124/*
2125 * Thread group CPU time accounting.
2126 */
2127#ifdef CONFIG_SMP
2128
2129extern int thread_group_cputime_alloc_smp(struct task_struct *);
2130extern void thread_group_cputime_smp(struct task_struct *, struct task_cputime *);
2131
2132static inline void thread_group_cputime_init(struct signal_struct *sig)
2133{
2134 sig->cputime.totals = NULL;
2135}
2136
2137static inline int thread_group_cputime_clone_thread(struct task_struct *curr,
2138 struct task_struct *new)
2139{
2140 if (curr->signal->cputime.totals)
2141 return 0;
2142 return thread_group_cputime_alloc_smp(curr);
2143}
2144
2145static inline void thread_group_cputime_free(struct signal_struct *sig)
2146{
2147 free_percpu(sig->cputime.totals);
2148}
2149
2150/**
2151 * thread_group_cputime - Sum the thread group time fields across all CPUs.
2152 *
2153 * This is a wrapper for the real routine, thread_group_cputime_smp(). See
2154 * that routine for details.
2155 */
2156static inline void thread_group_cputime(
2157 struct task_struct *tsk,
2158 struct task_cputime *times)
2159{
2160 thread_group_cputime_smp(tsk, times);
2161}
2162
2163/**
2164 * thread_group_cputime_account_user - Maintain utime for a thread group.
2165 *
2166 * @tgtimes: Pointer to thread_group_cputime structure.
2167 * @cputime: Time value by which to increment the utime field of that
2168 * structure.
2169 *
2170 * If thread group time is being maintained, get the structure for the
2171 * running CPU and update the utime field there.
2172 */
2173static inline void thread_group_cputime_account_user(
2174 struct thread_group_cputime *tgtimes,
2175 cputime_t cputime)
2176{
2177 if (tgtimes->totals) {
2178 struct task_cputime *times;
2179
2180 times = per_cpu_ptr(tgtimes->totals, get_cpu());
2181 times->utime = cputime_add(times->utime, cputime);
2182 put_cpu_no_resched();
2183 }
2184}
2185
2186/**
2187 * thread_group_cputime_account_system - Maintain stime for a thread group.
2188 *
2189 * @tgtimes: Pointer to thread_group_cputime structure.
2190 * @cputime: Time value by which to increment the stime field of that
2191 * structure.
2192 *
2193 * If thread group time is being maintained, get the structure for the
2194 * running CPU and update the stime field there.
2195 */
2196static inline void thread_group_cputime_account_system(
2197 struct thread_group_cputime *tgtimes,
2198 cputime_t cputime)
2199{
2200 if (tgtimes->totals) {
2201 struct task_cputime *times;
2202
2203 times = per_cpu_ptr(tgtimes->totals, get_cpu());
2204 times->stime = cputime_add(times->stime, cputime);
2205 put_cpu_no_resched();
2206 }
2207}
2208
2209/**
2210 * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a
2211 * thread group.
2212 *
2213 * @tgtimes: Pointer to thread_group_cputime structure.
2214 * @ns: Time value by which to increment the sum_exec_runtime field
2215 * of that structure.
2216 *
2217 * If thread group time is being maintained, get the structure for the
2218 * running CPU and update the sum_exec_runtime field there.
2219 */
2220static inline void thread_group_cputime_account_exec_runtime(
2221 struct thread_group_cputime *tgtimes,
2222 unsigned long long ns)
2223{
2224 if (tgtimes->totals) {
2225 struct task_cputime *times;
2226
2227 times = per_cpu_ptr(tgtimes->totals, get_cpu());
2228 times->sum_exec_runtime += ns;
2229 put_cpu_no_resched();
2230 }
2231}
2232
2233#else /* CONFIG_SMP */
2234
2235static inline void thread_group_cputime_init(struct signal_struct *sig)
2236{
2237 sig->cputime.totals.utime = cputime_zero;
2238 sig->cputime.totals.stime = cputime_zero;
2239 sig->cputime.totals.sum_exec_runtime = 0;
2240}
2241
2242static inline int thread_group_cputime_alloc(struct task_struct *tsk)
2243{
2244 return 0;
2245}
2246
2247static inline void thread_group_cputime_free(struct signal_struct *sig)
2248{
2249}
2250
2251static inline int thread_group_cputime_clone_thread(struct task_struct *curr,
2252 struct task_struct *tsk)
2253{
2254}
2255
2256static inline void thread_group_cputime(struct task_struct *tsk,
2257 struct task_cputime *cputime)
2258{
2259 *cputime = tsk->signal->cputime.totals;
2260}
2261
2262static inline void thread_group_cputime_account_user(
2263 struct thread_group_cputime *tgtimes,
2264 cputime_t cputime)
2265{
2266 tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime);
2267}
2268
2269static inline void thread_group_cputime_account_system(
2270 struct thread_group_cputime *tgtimes,
2271 cputime_t cputime)
2272{
2273 tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime);
2274}
2275
2276static inline void thread_group_cputime_account_exec_runtime(
2277 struct thread_group_cputime *tgtimes,
2278 unsigned long long ns)
2279{
2280 tgtimes->totals->sum_exec_runtime += ns;
2281}
2282
2283#endif /* CONFIG_SMP */
2284
2285static inline void account_group_user_time(struct task_struct *tsk,
2286 cputime_t cputime)
2287{
2288 struct signal_struct *sig;
2289
2290 sig = tsk->signal;
2291 if (likely(sig))
2292 thread_group_cputime_account_user(&sig->cputime, cputime);
2293}
2294
2295static inline void account_group_system_time(struct task_struct *tsk,
2296 cputime_t cputime)
2297{
2298 struct signal_struct *sig;
2299
2300 sig = tsk->signal;
2301 if (likely(sig))
2302 thread_group_cputime_account_system(&sig->cputime, cputime);
2303}
2304
2305static inline void account_group_exec_runtime(struct task_struct *tsk,
2306 unsigned long long ns)
2307{
2308 struct signal_struct *sig;
2309
2310 sig = tsk->signal;
2311 if (likely(sig))
2312 thread_group_cputime_account_exec_runtime(&sig->cputime, ns);
2313}
2314
2315/*
2085 * Reevaluate whether the task has signals pending delivery. 2316 * Reevaluate whether the task has signals pending delivery.
2086 * Wake the task if so. 2317 * Wake the task if so.
2087 * This is required every time the blocked sigset_t changes. 2318 * This is required every time the blocked sigset_t changes.