aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h20
1 files changed, 11 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b55e988988b5..f118809c953f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -268,7 +268,6 @@ extern void init_idle(struct task_struct *idle, int cpu);
268extern void init_idle_bootup_task(struct task_struct *idle); 268extern void init_idle_bootup_task(struct task_struct *idle);
269 269
270extern int runqueue_is_locked(int cpu); 270extern int runqueue_is_locked(int cpu);
271extern void task_rq_unlock_wait(struct task_struct *p);
272 271
273extern cpumask_var_t nohz_cpu_mask; 272extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 273#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -384,7 +383,7 @@ struct user_namespace;
384 * 1-3 now and depends on arch. We use "5" as safe margin, here. 383 * 1-3 now and depends on arch. We use "5" as safe margin, here.
385 */ 384 */
386#define MAPCOUNT_ELF_CORE_MARGIN (5) 385#define MAPCOUNT_ELF_CORE_MARGIN (5)
387#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 386#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
388 387
389extern int sysctl_max_map_count; 388extern int sysctl_max_map_count;
390 389
@@ -527,8 +526,9 @@ struct thread_group_cputimer {
527 * the locking of signal_struct. 526 * the locking of signal_struct.
528 */ 527 */
529struct signal_struct { 528struct signal_struct {
530 atomic_t count; 529 atomic_t sigcnt;
531 atomic_t live; 530 atomic_t live;
531 int nr_threads;
532 532
533 wait_queue_head_t wait_chldexit; /* for wait4() */ 533 wait_queue_head_t wait_chldexit; /* for wait4() */
534 534
@@ -1421,7 +1421,9 @@ struct task_struct {
1421#endif 1421#endif
1422#ifdef CONFIG_CPUSETS 1422#ifdef CONFIG_CPUSETS
1423 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1423 nodemask_t mems_allowed; /* Protected by alloc_lock */
1424 int mems_allowed_change_disable;
1424 int cpuset_mem_spread_rotor; 1425 int cpuset_mem_spread_rotor;
1426 int cpuset_slab_spread_rotor;
1425#endif 1427#endif
1426#ifdef CONFIG_CGROUPS 1428#ifdef CONFIG_CGROUPS
1427 /* Control Group info protected by css_set_lock */ 1429 /* Control Group info protected by css_set_lock */
@@ -2034,7 +2036,7 @@ extern int do_notify_parent(struct task_struct *, int);
2034extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2036extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2035extern void force_sig(int, struct task_struct *); 2037extern void force_sig(int, struct task_struct *);
2036extern int send_sig(int, struct task_struct *, int); 2038extern int send_sig(int, struct task_struct *, int);
2037extern void zap_other_threads(struct task_struct *p); 2039extern int zap_other_threads(struct task_struct *p);
2038extern struct sigqueue *sigqueue_alloc(void); 2040extern struct sigqueue *sigqueue_alloc(void);
2039extern void sigqueue_free(struct sigqueue *); 2041extern void sigqueue_free(struct sigqueue *);
2040extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2042extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -2099,7 +2101,6 @@ extern void flush_thread(void);
2099extern void exit_thread(void); 2101extern void exit_thread(void);
2100 2102
2101extern void exit_files(struct task_struct *); 2103extern void exit_files(struct task_struct *);
2102extern void __cleanup_signal(struct signal_struct *);
2103extern void __cleanup_sighand(struct sighand_struct *); 2104extern void __cleanup_sighand(struct sighand_struct *);
2104 2105
2105extern void exit_itimers(struct signal_struct *); 2106extern void exit_itimers(struct signal_struct *);
@@ -2146,6 +2147,11 @@ extern bool current_is_single_threaded(void);
2146#define while_each_thread(g, t) \ 2147#define while_each_thread(g, t) \
2147 while ((t = next_thread(t)) != g) 2148 while ((t = next_thread(t)) != g)
2148 2149
2150static inline int get_nr_threads(struct task_struct *tsk)
2151{
2152 return tsk->signal->nr_threads;
2153}
2154
2149/* de_thread depends on thread_group_leader not being a pid based check */ 2155/* de_thread depends on thread_group_leader not being a pid based check */
2150#define thread_group_leader(p) (p == p->group_leader) 2156#define thread_group_leader(p) (p == p->group_leader)
2151 2157
@@ -2392,10 +2398,6 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
2392 spin_lock_init(&sig->cputimer.lock); 2398 spin_lock_init(&sig->cputimer.lock);
2393} 2399}
2394 2400
2395static inline void thread_group_cputime_free(struct signal_struct *sig)
2396{
2397}
2398
2399/* 2401/*
2400 * Reevaluate whether the task has signals pending delivery. 2402 * Reevaluate whether the task has signals pending delivery.
2401 * Wake the task if so. 2403 * Wake the task if so.