aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h80
1 files changed, 55 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4cae9b81a1f8..a7c7698583bb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -293,6 +293,9 @@ extern void sched_show_task(struct task_struct *p);
293extern void softlockup_tick(void); 293extern void softlockup_tick(void);
294extern void touch_softlockup_watchdog(void); 294extern void touch_softlockup_watchdog(void);
295extern void touch_all_softlockup_watchdogs(void); 295extern void touch_all_softlockup_watchdogs(void);
296extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
297 struct file *filp, void __user *buffer,
298 size_t *lenp, loff_t *ppos);
296extern unsigned int softlockup_panic; 299extern unsigned int softlockup_panic;
297extern unsigned long sysctl_hung_task_check_count; 300extern unsigned long sysctl_hung_task_check_count;
298extern unsigned long sysctl_hung_task_timeout_secs; 301extern unsigned long sysctl_hung_task_timeout_secs;
@@ -456,16 +459,27 @@ struct task_cputime {
456#define virt_exp utime 459#define virt_exp utime
457#define sched_exp sum_exec_runtime 460#define sched_exp sum_exec_runtime
458 461
462#define INIT_CPUTIME \
463 (struct task_cputime) { \
464 .utime = cputime_zero, \
465 .stime = cputime_zero, \
466 .sum_exec_runtime = 0, \
467 }
468
459/** 469/**
460 * struct thread_group_cputime - thread group interval timer counts 470 * struct thread_group_cputimer - thread group interval timer counts
461 * @totals: thread group interval timers; substructure for 471 * @cputime: thread group interval timers.
462 * uniprocessor kernel, per-cpu for SMP kernel. 472 * @running: non-zero when there are timers running and
473 * @cputime receives updates.
474 * @lock: lock for fields in this struct.
463 * 475 *
464 * This structure contains the version of task_cputime, above, that is 476 * This structure contains the version of task_cputime, above, that is
465 * used for thread group CPU clock calculations. 477 * used for thread group CPU timer calculations.
466 */ 478 */
467struct thread_group_cputime { 479struct thread_group_cputimer {
468 struct task_cputime *totals; 480 struct task_cputime cputime;
481 int running;
482 spinlock_t lock;
469}; 483};
470 484
471/* 485/*
@@ -514,10 +528,10 @@ struct signal_struct {
514 cputime_t it_prof_incr, it_virt_incr; 528 cputime_t it_prof_incr, it_virt_incr;
515 529
516 /* 530 /*
517 * Thread group totals for process CPU clocks. 531 * Thread group totals for process CPU timers.
518 * See thread_group_cputime(), et al, for details. 532 * See thread_group_cputimer(), et al, for details.
519 */ 533 */
520 struct thread_group_cputime cputime; 534 struct thread_group_cputimer cputimer;
521 535
522 /* Earliest-expiration cache. */ 536 /* Earliest-expiration cache. */
523 struct task_cputime cputime_expires; 537 struct task_cputime cputime_expires;
@@ -554,7 +568,7 @@ struct signal_struct {
554 * Live threads maintain their own counters and add to these 568 * Live threads maintain their own counters and add to these
555 * in __exit_signal, except for the group leader. 569 * in __exit_signal, except for the group leader.
556 */ 570 */
557 cputime_t cutime, cstime; 571 cputime_t utime, stime, cutime, cstime;
558 cputime_t gtime; 572 cputime_t gtime;
559 cputime_t cgtime; 573 cputime_t cgtime;
560 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 574 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -563,6 +577,14 @@ struct signal_struct {
563 struct task_io_accounting ioac; 577 struct task_io_accounting ioac;
564 578
565 /* 579 /*
580 * Cumulative ns of schedule CPU time fo dead threads in the
581 * group, not including a zombie group leader, (This only differs
582 * from jiffies_to_ns(utime + stime) if sched_clock uses something
583 * other than jiffies.)
584 */
585 unsigned long long sum_sched_runtime;
586
587 /*
566 * We don't bother to synchronize most readers of this at all, 588 * We don't bother to synchronize most readers of this at all,
567 * because there is no reader checking a limit that actually needs 589 * because there is no reader checking a limit that actually needs
568 * to get both rlim_cur and rlim_max atomically, and either one 590 * to get both rlim_cur and rlim_max atomically, and either one
@@ -626,7 +648,6 @@ struct user_struct {
626 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 648 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
627#endif 649#endif
628#ifdef CONFIG_EPOLL 650#ifdef CONFIG_EPOLL
629 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
630 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 651 atomic_t epoll_watches; /* The number of file descriptors currently watched */
631#endif 652#endif
632#ifdef CONFIG_POSIX_MQUEUE 653#ifdef CONFIG_POSIX_MQUEUE
@@ -1157,10 +1178,9 @@ struct task_struct {
1157 pid_t pid; 1178 pid_t pid;
1158 pid_t tgid; 1179 pid_t tgid;
1159 1180
1160#ifdef CONFIG_CC_STACKPROTECTOR
1161 /* Canary value for the -fstack-protector gcc feature */ 1181 /* Canary value for the -fstack-protector gcc feature */
1162 unsigned long stack_canary; 1182 unsigned long stack_canary;
1163#endif 1183
1164 /* 1184 /*
1165 * pointers to (original) parent process, youngest child, younger sibling, 1185 * pointers to (original) parent process, youngest child, younger sibling,
1166 * older sibling, respectively. (p->father can be replaced with 1186 * older sibling, respectively. (p->father can be replaced with
@@ -2066,6 +2086,19 @@ static inline int object_is_on_stack(void *obj)
2066 2086
2067extern void thread_info_cache_init(void); 2087extern void thread_info_cache_init(void);
2068 2088
2089#ifdef CONFIG_DEBUG_STACK_USAGE
2090static inline unsigned long stack_not_used(struct task_struct *p)
2091{
2092 unsigned long *n = end_of_stack(p);
2093
2094 do { /* Skip over canary */
2095 n++;
2096 } while (!*n);
2097
2098 return (unsigned long)n - (unsigned long)end_of_stack(p);
2099}
2100#endif
2101
2069/* set thread flags in other task's structures 2102/* set thread flags in other task's structures
2070 * - see asm/thread_info.h for TIF_xxxx flags available 2103 * - see asm/thread_info.h for TIF_xxxx flags available
2071 */ 2104 */
@@ -2179,25 +2212,18 @@ static inline int spin_needbreak(spinlock_t *lock)
2179/* 2212/*
2180 * Thread group CPU time accounting. 2213 * Thread group CPU time accounting.
2181 */ 2214 */
2182 2215void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2183extern int thread_group_cputime_alloc(struct task_struct *); 2216void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2184extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2185 2217
2186static inline void thread_group_cputime_init(struct signal_struct *sig) 2218static inline void thread_group_cputime_init(struct signal_struct *sig)
2187{ 2219{
2188 sig->cputime.totals = NULL; 2220 sig->cputimer.cputime = INIT_CPUTIME;
2189} 2221 spin_lock_init(&sig->cputimer.lock);
2190 2222 sig->cputimer.running = 0;
2191static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2192{
2193 if (curr->signal->cputime.totals)
2194 return 0;
2195 return thread_group_cputime_alloc(curr);
2196} 2223}
2197 2224
2198static inline void thread_group_cputime_free(struct signal_struct *sig) 2225static inline void thread_group_cputime_free(struct signal_struct *sig)
2199{ 2226{
2200 free_percpu(sig->cputime.totals);
2201} 2227}
2202 2228
2203/* 2229/*
@@ -2277,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2277extern int sched_group_set_rt_period(struct task_group *tg, 2303extern int sched_group_set_rt_period(struct task_group *tg,
2278 long rt_period_us); 2304 long rt_period_us);
2279extern long sched_group_rt_period(struct task_group *tg); 2305extern long sched_group_rt_period(struct task_group *tg);
2306extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2280#endif 2307#endif
2281#endif 2308#endif
2282 2309
2310extern int task_can_switch_user(struct user_struct *up,
2311 struct task_struct *tsk);
2312
2283#ifdef CONFIG_TASK_XACCT 2313#ifdef CONFIG_TASK_XACCT
2284static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2314static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2285{ 2315{