aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h49
1 files changed, 35 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4cae9b81a1f8..2225c207801c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -293,6 +293,9 @@ extern void sched_show_task(struct task_struct *p);
293extern void softlockup_tick(void); 293extern void softlockup_tick(void);
294extern void touch_softlockup_watchdog(void); 294extern void touch_softlockup_watchdog(void);
295extern void touch_all_softlockup_watchdogs(void); 295extern void touch_all_softlockup_watchdogs(void);
296extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
297 struct file *filp, void __user *buffer,
298 size_t *lenp, loff_t *ppos);
296extern unsigned int softlockup_panic; 299extern unsigned int softlockup_panic;
297extern unsigned long sysctl_hung_task_check_count; 300extern unsigned long sysctl_hung_task_check_count;
298extern unsigned long sysctl_hung_task_timeout_secs; 301extern unsigned long sysctl_hung_task_timeout_secs;
@@ -450,6 +453,7 @@ struct task_cputime {
450 cputime_t utime; 453 cputime_t utime;
451 cputime_t stime; 454 cputime_t stime;
452 unsigned long long sum_exec_runtime; 455 unsigned long long sum_exec_runtime;
456 spinlock_t lock;
453}; 457};
454/* Alternate field names when used to cache expirations. */ 458/* Alternate field names when used to cache expirations. */
455#define prof_exp stime 459#define prof_exp stime
@@ -465,7 +469,7 @@ struct task_cputime {
465 * used for thread group CPU clock calculations. 469 * used for thread group CPU clock calculations.
466 */ 470 */
467struct thread_group_cputime { 471struct thread_group_cputime {
468 struct task_cputime *totals; 472 struct task_cputime totals;
469}; 473};
470 474
471/* 475/*
@@ -626,7 +630,6 @@ struct user_struct {
626 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 630 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
627#endif 631#endif
628#ifdef CONFIG_EPOLL 632#ifdef CONFIG_EPOLL
629 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
630 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 633 atomic_t epoll_watches; /* The number of file descriptors currently watched */
631#endif 634#endif
632#ifdef CONFIG_POSIX_MQUEUE 635#ifdef CONFIG_POSIX_MQUEUE
@@ -1157,10 +1160,9 @@ struct task_struct {
1157 pid_t pid; 1160 pid_t pid;
1158 pid_t tgid; 1161 pid_t tgid;
1159 1162
1160#ifdef CONFIG_CC_STACKPROTECTOR
1161 /* Canary value for the -fstack-protector gcc feature */ 1163 /* Canary value for the -fstack-protector gcc feature */
1162 unsigned long stack_canary; 1164 unsigned long stack_canary;
1163#endif 1165
1164 /* 1166 /*
1165 * pointers to (original) parent process, youngest child, younger sibling, 1167 * pointers to (original) parent process, youngest child, younger sibling,
1166 * older sibling, respectively. (p->father can be replaced with 1168 * older sibling, respectively. (p->father can be replaced with
@@ -2066,6 +2068,19 @@ static inline int object_is_on_stack(void *obj)
2066 2068
2067extern void thread_info_cache_init(void); 2069extern void thread_info_cache_init(void);
2068 2070
2071#ifdef CONFIG_DEBUG_STACK_USAGE
2072static inline unsigned long stack_not_used(struct task_struct *p)
2073{
2074 unsigned long *n = end_of_stack(p);
2075
2076 do { /* Skip over canary */
2077 n++;
2078 } while (!*n);
2079
2080 return (unsigned long)n - (unsigned long)end_of_stack(p);
2081}
2082#endif
2083
2069/* set thread flags in other task's structures 2084/* set thread flags in other task's structures
2070 * - see asm/thread_info.h for TIF_xxxx flags available 2085 * - see asm/thread_info.h for TIF_xxxx flags available
2071 */ 2086 */
@@ -2180,24 +2195,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2180 * Thread group CPU time accounting. 2195 * Thread group CPU time accounting.
2181 */ 2196 */
2182 2197
2183extern int thread_group_cputime_alloc(struct task_struct *); 2198static inline
2184extern void thread_group_cputime(struct task_struct *, struct task_cputime *); 2199void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
2185
2186static inline void thread_group_cputime_init(struct signal_struct *sig)
2187{ 2200{
2188 sig->cputime.totals = NULL; 2201 struct task_cputime *totals = &tsk->signal->cputime.totals;
2202 unsigned long flags;
2203
2204 spin_lock_irqsave(&totals->lock, flags);
2205 *times = *totals;
2206 spin_unlock_irqrestore(&totals->lock, flags);
2189} 2207}
2190 2208
2191static inline int thread_group_cputime_clone_thread(struct task_struct *curr) 2209static inline void thread_group_cputime_init(struct signal_struct *sig)
2192{ 2210{
2193 if (curr->signal->cputime.totals) 2211 sig->cputime.totals = (struct task_cputime){
2194 return 0; 2212 .utime = cputime_zero,
2195 return thread_group_cputime_alloc(curr); 2213 .stime = cputime_zero,
2214 .sum_exec_runtime = 0,
2215 };
2216
2217 spin_lock_init(&sig->cputime.totals.lock);
2196} 2218}
2197 2219
2198static inline void thread_group_cputime_free(struct signal_struct *sig) 2220static inline void thread_group_cputime_free(struct signal_struct *sig)
2199{ 2221{
2200 free_percpu(sig->cputime.totals);
2201} 2222}
2202 2223
2203/* 2224/*