aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1222
1 files changed, 651 insertions, 571 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 341f5792fbe0..d67eee84fd43 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1,38 +1,38 @@
1#ifndef _LINUX_SCHED_H 1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H 2#define _LINUX_SCHED_H
3 3
4#include <uapi/linux/sched.h> 4/*
5 * Define 'struct task_struct' and provide the main scheduler
6 * APIs (schedule(), wakeup variants, etc.)
7 */
5 8
6#include <linux/sched/prio.h> 9#include <uapi/linux/sched.h>
7#include <linux/nodemask.h>
8 10
9#include <linux/mutex.h> 11#include <asm/current.h>
10#include <linux/plist.h>
11#include <linux/mm_types_task.h>
12 12
13#include <linux/pid.h>
13#include <linux/sem.h> 14#include <linux/sem.h>
14#include <linux/shm.h> 15#include <linux/shm.h>
15#include <linux/signal_types.h> 16#include <linux/kcov.h>
16#include <linux/pid.h> 17#include <linux/mutex.h>
18#include <linux/plist.h>
19#include <linux/hrtimer.h>
17#include <linux/seccomp.h> 20#include <linux/seccomp.h>
21#include <linux/nodemask.h>
18#include <linux/rcupdate.h> 22#include <linux/rcupdate.h>
19
20#include <linux/resource.h> 23#include <linux/resource.h>
21#include <linux/hrtimer.h>
22#include <linux/kcov.h>
23#include <linux/task_io_accounting.h>
24#include <linux/latencytop.h> 24#include <linux/latencytop.h>
25#include <linux/sched/prio.h>
26#include <linux/signal_types.h>
27#include <linux/mm_types_task.h>
28#include <linux/task_io_accounting.h>
25 29
26#include <asm/current.h> 30/* task_struct member predeclarations (sorted alphabetically): */
27
28/* task_struct member predeclarations: */
29struct audit_context; 31struct audit_context;
30struct autogroup;
31struct backing_dev_info; 32struct backing_dev_info;
32struct bio_list; 33struct bio_list;
33struct blk_plug; 34struct blk_plug;
34struct cfs_rq; 35struct cfs_rq;
35struct filename;
36struct fs_struct; 36struct fs_struct;
37struct futex_pi_state; 37struct futex_pi_state;
38struct io_context; 38struct io_context;
@@ -52,8 +52,6 @@ struct sighand_struct;
52struct signal_struct; 52struct signal_struct;
53struct task_delay_info; 53struct task_delay_info;
54struct task_group; 54struct task_group;
55struct task_struct;
56struct uts_namespace;
57 55
58/* 56/*
59 * Task state bitmask. NOTE! These bits are also 57 * Task state bitmask. NOTE! These bits are also
@@ -65,50 +63,53 @@ struct uts_namespace;
65 * modifying one set can't modify the other one by 63 * modifying one set can't modify the other one by
66 * mistake. 64 * mistake.
67 */ 65 */
68#define TASK_RUNNING 0 66
69#define TASK_INTERRUPTIBLE 1 67/* Used in tsk->state: */
70#define TASK_UNINTERRUPTIBLE 2 68#define TASK_RUNNING 0
71#define __TASK_STOPPED 4 69#define TASK_INTERRUPTIBLE 1
72#define __TASK_TRACED 8 70#define TASK_UNINTERRUPTIBLE 2
73/* in tsk->exit_state */ 71#define __TASK_STOPPED 4
74#define EXIT_DEAD 16 72#define __TASK_TRACED 8
75#define EXIT_ZOMBIE 32 73/* Used in tsk->exit_state: */
76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 74#define EXIT_DEAD 16
77/* in tsk->state again */ 75#define EXIT_ZOMBIE 32
78#define TASK_DEAD 64 76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
79#define TASK_WAKEKILL 128 77/* Used in tsk->state again: */
80#define TASK_WAKING 256 78#define TASK_DEAD 64
81#define TASK_PARKED 512 79#define TASK_WAKEKILL 128
82#define TASK_NOLOAD 1024 80#define TASK_WAKING 256
83#define TASK_NEW 2048 81#define TASK_PARKED 512
84#define TASK_STATE_MAX 4096 82#define TASK_NOLOAD 1024
85 83#define TASK_NEW 2048
86#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" 84#define TASK_STATE_MAX 4096
87 85
88/* Convenience macros for the sake of set_current_state */ 86#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
89#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 87
90#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 88/* Convenience macros for the sake of set_current_state: */
91#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 89#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
92 90#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
93#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 91#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
94 92
95/* Convenience macros for the sake of wake_up */ 93#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
96#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 94
97#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 95/* Convenience macros for the sake of wake_up(): */
98 96#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
99/* get_task_state() */ 97#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 98
101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 99/* get_task_state(): */
102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
103 101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
105#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 103
106#define task_is_stopped_or_traced(task) \ 104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
107 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 105
108#define task_contributes_to_load(task) \ 106#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
109 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 107
110 (task->flags & PF_FROZEN) == 0 && \ 108#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
111 (task->state & TASK_NOLOAD) == 0) 109
110#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
111 (task->flags & PF_FROZEN) == 0 && \
112 (task->state & TASK_NOLOAD) == 0)
112 113
113#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 114#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
114 115
@@ -158,26 +159,24 @@ struct uts_namespace;
158 * 159 *
159 * Also see the comments of try_to_wake_up(). 160 * Also see the comments of try_to_wake_up().
160 */ 161 */
161#define __set_current_state(state_value) \ 162#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
162 do { current->state = (state_value); } while (0) 163#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
163#define set_current_state(state_value) \
164 smp_store_mb(current->state, (state_value))
165
166#endif 164#endif
167 165
168/* Task command name length */ 166/* Task command name length: */
169#define TASK_COMM_LEN 16 167#define TASK_COMM_LEN 16
170 168
171extern cpumask_var_t cpu_isolated_map; 169extern cpumask_var_t cpu_isolated_map;
172 170
173extern void scheduler_tick(void); 171extern void scheduler_tick(void);
174 172
175#define MAX_SCHEDULE_TIMEOUT LONG_MAX 173#define MAX_SCHEDULE_TIMEOUT LONG_MAX
176extern signed long schedule_timeout(signed long timeout); 174
177extern signed long schedule_timeout_interruptible(signed long timeout); 175extern long schedule_timeout(long timeout);
178extern signed long schedule_timeout_killable(signed long timeout); 176extern long schedule_timeout_interruptible(long timeout);
179extern signed long schedule_timeout_uninterruptible(signed long timeout); 177extern long schedule_timeout_killable(long timeout);
180extern signed long schedule_timeout_idle(signed long timeout); 178extern long schedule_timeout_uninterruptible(long timeout);
179extern long schedule_timeout_idle(long timeout);
181asmlinkage void schedule(void); 180asmlinkage void schedule(void);
182extern void schedule_preempt_disabled(void); 181extern void schedule_preempt_disabled(void);
183 182
@@ -197,9 +196,9 @@ extern void io_schedule(void);
197 */ 196 */
198struct prev_cputime { 197struct prev_cputime {
199#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 198#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
200 u64 utime; 199 u64 utime;
201 u64 stime; 200 u64 stime;
202 raw_spinlock_t lock; 201 raw_spinlock_t lock;
203#endif 202#endif
204}; 203};
205 204
@@ -214,25 +213,34 @@ struct prev_cputime {
214 * these counts together and treat all three of them in parallel. 213 * these counts together and treat all three of them in parallel.
215 */ 214 */
216struct task_cputime { 215struct task_cputime {
217 u64 utime; 216 u64 utime;
218 u64 stime; 217 u64 stime;
219 unsigned long long sum_exec_runtime; 218 unsigned long long sum_exec_runtime;
220}; 219};
221 220
222/* Alternate field names when used to cache expirations. */ 221/* Alternate field names when used on cache expirations: */
223#define virt_exp utime 222#define virt_exp utime
224#define prof_exp stime 223#define prof_exp stime
225#define sched_exp sum_exec_runtime 224#define sched_exp sum_exec_runtime
226 225
227struct sched_info { 226struct sched_info {
228#ifdef CONFIG_SCHED_INFO 227#ifdef CONFIG_SCHED_INFO
229 /* cumulative counters */ 228 /* Cumulative counters: */
230 unsigned long pcount; /* # of times run on this cpu */ 229
231 unsigned long long run_delay; /* time spent waiting on a runqueue */ 230 /* # of times we have run on this CPU: */
231 unsigned long pcount;
232
233 /* Time spent waiting on a runqueue: */
234 unsigned long long run_delay;
235
236 /* Timestamps: */
237
238 /* When did we last run on a CPU? */
239 unsigned long long last_arrival;
240
241 /* When were we last queued to run? */
242 unsigned long long last_queued;
232 243
233 /* timestamps */
234 unsigned long long last_arrival,/* when we last ran on a cpu */
235 last_queued; /* when we were last queued to run */
236#endif /* CONFIG_SCHED_INFO */ 244#endif /* CONFIG_SCHED_INFO */
237}; 245};
238 246
@@ -243,12 +251,12 @@ struct sched_info {
243 * We define a basic fixed point arithmetic range, and then formalize 251 * We define a basic fixed point arithmetic range, and then formalize
244 * all these metrics based on that basic range. 252 * all these metrics based on that basic range.
245 */ 253 */
246# define SCHED_FIXEDPOINT_SHIFT 10 254# define SCHED_FIXEDPOINT_SHIFT 10
247# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 255# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
248 256
249struct load_weight { 257struct load_weight {
250 unsigned long weight; 258 unsigned long weight;
251 u32 inv_weight; 259 u32 inv_weight;
252}; 260};
253 261
254/* 262/*
@@ -304,69 +312,73 @@ struct load_weight {
304 * issues. 312 * issues.
305 */ 313 */
306struct sched_avg { 314struct sched_avg {
307 u64 last_update_time, load_sum; 315 u64 last_update_time;
308 u32 util_sum, period_contrib; 316 u64 load_sum;
309 unsigned long load_avg, util_avg; 317 u32 util_sum;
318 u32 period_contrib;
319 unsigned long load_avg;
320 unsigned long util_avg;
310}; 321};
311 322
312struct sched_statistics { 323struct sched_statistics {
313#ifdef CONFIG_SCHEDSTATS 324#ifdef CONFIG_SCHEDSTATS
314 u64 wait_start; 325 u64 wait_start;
315 u64 wait_max; 326 u64 wait_max;
316 u64 wait_count; 327 u64 wait_count;
317 u64 wait_sum; 328 u64 wait_sum;
318 u64 iowait_count; 329 u64 iowait_count;
319 u64 iowait_sum; 330 u64 iowait_sum;
320 331
321 u64 sleep_start; 332 u64 sleep_start;
322 u64 sleep_max; 333 u64 sleep_max;
323 s64 sum_sleep_runtime; 334 s64 sum_sleep_runtime;
324 335
325 u64 block_start; 336 u64 block_start;
326 u64 block_max; 337 u64 block_max;
327 u64 exec_max; 338 u64 exec_max;
328 u64 slice_max; 339 u64 slice_max;
329 340
330 u64 nr_migrations_cold; 341 u64 nr_migrations_cold;
331 u64 nr_failed_migrations_affine; 342 u64 nr_failed_migrations_affine;
332 u64 nr_failed_migrations_running; 343 u64 nr_failed_migrations_running;
333 u64 nr_failed_migrations_hot; 344 u64 nr_failed_migrations_hot;
334 u64 nr_forced_migrations; 345 u64 nr_forced_migrations;
335 346
336 u64 nr_wakeups; 347 u64 nr_wakeups;
337 u64 nr_wakeups_sync; 348 u64 nr_wakeups_sync;
338 u64 nr_wakeups_migrate; 349 u64 nr_wakeups_migrate;
339 u64 nr_wakeups_local; 350 u64 nr_wakeups_local;
340 u64 nr_wakeups_remote; 351 u64 nr_wakeups_remote;
341 u64 nr_wakeups_affine; 352 u64 nr_wakeups_affine;
342 u64 nr_wakeups_affine_attempts; 353 u64 nr_wakeups_affine_attempts;
343 u64 nr_wakeups_passive; 354 u64 nr_wakeups_passive;
344 u64 nr_wakeups_idle; 355 u64 nr_wakeups_idle;
345#endif 356#endif
346}; 357};
347 358
348struct sched_entity { 359struct sched_entity {
349 struct load_weight load; /* for load-balancing */ 360 /* For load-balancing: */
350 struct rb_node run_node; 361 struct load_weight load;
351 struct list_head group_node; 362 struct rb_node run_node;
352 unsigned int on_rq; 363 struct list_head group_node;
364 unsigned int on_rq;
353 365
354 u64 exec_start; 366 u64 exec_start;
355 u64 sum_exec_runtime; 367 u64 sum_exec_runtime;
356 u64 vruntime; 368 u64 vruntime;
357 u64 prev_sum_exec_runtime; 369 u64 prev_sum_exec_runtime;
358 370
359 u64 nr_migrations; 371 u64 nr_migrations;
360 372
361 struct sched_statistics statistics; 373 struct sched_statistics statistics;
362 374
363#ifdef CONFIG_FAIR_GROUP_SCHED 375#ifdef CONFIG_FAIR_GROUP_SCHED
364 int depth; 376 int depth;
365 struct sched_entity *parent; 377 struct sched_entity *parent;
366 /* rq on which this entity is (to be) queued: */ 378 /* rq on which this entity is (to be) queued: */
367 struct cfs_rq *cfs_rq; 379 struct cfs_rq *cfs_rq;
368 /* rq "owned" by this entity/group: */ 380 /* rq "owned" by this entity/group: */
369 struct cfs_rq *my_q; 381 struct cfs_rq *my_q;
370#endif 382#endif
371 383
372#ifdef CONFIG_SMP 384#ifdef CONFIG_SMP
@@ -376,49 +388,49 @@ struct sched_entity {
376 * Put into separate cache line so it does not 388 * Put into separate cache line so it does not
377 * collide with read-mostly values above. 389 * collide with read-mostly values above.
378 */ 390 */
379 struct sched_avg avg ____cacheline_aligned_in_smp; 391 struct sched_avg avg ____cacheline_aligned_in_smp;
380#endif 392#endif
381}; 393};
382 394
383struct sched_rt_entity { 395struct sched_rt_entity {
384 struct list_head run_list; 396 struct list_head run_list;
385 unsigned long timeout; 397 unsigned long timeout;
386 unsigned long watchdog_stamp; 398 unsigned long watchdog_stamp;
387 unsigned int time_slice; 399 unsigned int time_slice;
388 unsigned short on_rq; 400 unsigned short on_rq;
389 unsigned short on_list; 401 unsigned short on_list;
390 402
391 struct sched_rt_entity *back; 403 struct sched_rt_entity *back;
392#ifdef CONFIG_RT_GROUP_SCHED 404#ifdef CONFIG_RT_GROUP_SCHED
393 struct sched_rt_entity *parent; 405 struct sched_rt_entity *parent;
394 /* rq on which this entity is (to be) queued: */ 406 /* rq on which this entity is (to be) queued: */
395 struct rt_rq *rt_rq; 407 struct rt_rq *rt_rq;
396 /* rq "owned" by this entity/group: */ 408 /* rq "owned" by this entity/group: */
397 struct rt_rq *my_q; 409 struct rt_rq *my_q;
398#endif 410#endif
399}; 411};
400 412
401struct sched_dl_entity { 413struct sched_dl_entity {
402 struct rb_node rb_node; 414 struct rb_node rb_node;
403 415
404 /* 416 /*
405 * Original scheduling parameters. Copied here from sched_attr 417 * Original scheduling parameters. Copied here from sched_attr
406 * during sched_setattr(), they will remain the same until 418 * during sched_setattr(), they will remain the same until
407 * the next sched_setattr(). 419 * the next sched_setattr().
408 */ 420 */
409 u64 dl_runtime; /* maximum runtime for each instance */ 421 u64 dl_runtime; /* Maximum runtime for each instance */
410 u64 dl_deadline; /* relative deadline of each instance */ 422 u64 dl_deadline; /* Relative deadline of each instance */
411 u64 dl_period; /* separation of two instances (period) */ 423 u64 dl_period; /* Separation of two instances (period) */
412 u64 dl_bw; /* dl_runtime / dl_deadline */ 424 u64 dl_bw; /* dl_runtime / dl_deadline */
413 425
414 /* 426 /*
415 * Actual scheduling parameters. Initialized with the values above, 427 * Actual scheduling parameters. Initialized with the values above,
416 * they are continously updated during task execution. Note that 428 * they are continously updated during task execution. Note that
417 * the remaining runtime could be < 0 in case we are in overrun. 429 * the remaining runtime could be < 0 in case we are in overrun.
418 */ 430 */
419 s64 runtime; /* remaining runtime for this instance */ 431 s64 runtime; /* Remaining runtime for this instance */
420 u64 deadline; /* absolute deadline for this instance */ 432 u64 deadline; /* Absolute deadline for this instance */
421 unsigned int flags; /* specifying the scheduler behaviour */ 433 unsigned int flags; /* Specifying the scheduler behaviour */
422 434
423 /* 435 /*
424 * Some bool flags: 436 * Some bool flags:
@@ -431,24 +443,28 @@ struct sched_dl_entity {
431 * outside bandwidth enforcement mechanism (but only until we 443 * outside bandwidth enforcement mechanism (but only until we
432 * exit the critical section); 444 * exit the critical section);
433 * 445 *
434 * @dl_yielded tells if task gave up the cpu before consuming 446 * @dl_yielded tells if task gave up the CPU before consuming
435 * all its available runtime during the last job. 447 * all its available runtime during the last job.
436 */ 448 */
437 int dl_throttled, dl_boosted, dl_yielded; 449 int dl_throttled;
450 int dl_boosted;
451 int dl_yielded;
438 452
439 /* 453 /*
440 * Bandwidth enforcement timer. Each -deadline task has its 454 * Bandwidth enforcement timer. Each -deadline task has its
441 * own bandwidth to be enforced, thus we need one timer per task. 455 * own bandwidth to be enforced, thus we need one timer per task.
442 */ 456 */
443 struct hrtimer dl_timer; 457 struct hrtimer dl_timer;
444}; 458};
445 459
446union rcu_special { 460union rcu_special {
447 struct { 461 struct {
448 u8 blocked; 462 u8 blocked;
449 u8 need_qs; 463 u8 need_qs;
450 u8 exp_need_qs; 464 u8 exp_need_qs;
451 u8 pad; /* Otherwise the compiler can store garbage here. */ 465
466 /* Otherwise the compiler can store garbage here: */
467 u8 pad;
452 } b; /* Bits. */ 468 } b; /* Bits. */
453 u32 s; /* Set of bits. */ 469 u32 s; /* Set of bits. */
454}; 470};
@@ -470,361 +486,417 @@ struct task_struct {
470 * For reasons of header soup (see current_thread_info()), this 486 * For reasons of header soup (see current_thread_info()), this
471 * must be the first element of task_struct. 487 * must be the first element of task_struct.
472 */ 488 */
473 struct thread_info thread_info; 489 struct thread_info thread_info;
474#endif 490#endif
475 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 491 /* -1 unrunnable, 0 runnable, >0 stopped: */
476 void *stack; 492 volatile long state;
477 atomic_t usage; 493 void *stack;
478 unsigned int flags; /* per process flags, defined below */ 494 atomic_t usage;
479 unsigned int ptrace; 495 /* Per task flags (PF_*), defined further below: */
496 unsigned int flags;
497 unsigned int ptrace;
480 498
481#ifdef CONFIG_SMP 499#ifdef CONFIG_SMP
482 struct llist_node wake_entry; 500 struct llist_node wake_entry;
483 int on_cpu; 501 int on_cpu;
484#ifdef CONFIG_THREAD_INFO_IN_TASK 502#ifdef CONFIG_THREAD_INFO_IN_TASK
485 unsigned int cpu; /* current CPU */ 503 /* Current CPU: */
504 unsigned int cpu;
486#endif 505#endif
487 unsigned int wakee_flips; 506 unsigned int wakee_flips;
488 unsigned long wakee_flip_decay_ts; 507 unsigned long wakee_flip_decay_ts;
489 struct task_struct *last_wakee; 508 struct task_struct *last_wakee;
490 509
491 int wake_cpu; 510 int wake_cpu;
492#endif 511#endif
493 int on_rq; 512 int on_rq;
513
514 int prio;
515 int static_prio;
516 int normal_prio;
517 unsigned int rt_priority;
494 518
495 int prio, static_prio, normal_prio; 519 const struct sched_class *sched_class;
496 unsigned int rt_priority; 520 struct sched_entity se;
497 const struct sched_class *sched_class; 521 struct sched_rt_entity rt;
498 struct sched_entity se;
499 struct sched_rt_entity rt;
500#ifdef CONFIG_CGROUP_SCHED 522#ifdef CONFIG_CGROUP_SCHED
501 struct task_group *sched_task_group; 523 struct task_group *sched_task_group;
502#endif 524#endif
503 struct sched_dl_entity dl; 525 struct sched_dl_entity dl;
504 526
505#ifdef CONFIG_PREEMPT_NOTIFIERS 527#ifdef CONFIG_PREEMPT_NOTIFIERS
506 /* list of struct preempt_notifier: */ 528 /* List of struct preempt_notifier: */
507 struct hlist_head preempt_notifiers; 529 struct hlist_head preempt_notifiers;
508#endif 530#endif
509 531
510#ifdef CONFIG_BLK_DEV_IO_TRACE 532#ifdef CONFIG_BLK_DEV_IO_TRACE
511 unsigned int btrace_seq; 533 unsigned int btrace_seq;
512#endif 534#endif
513 535
514 unsigned int policy; 536 unsigned int policy;
515 int nr_cpus_allowed; 537 int nr_cpus_allowed;
516 cpumask_t cpus_allowed; 538 cpumask_t cpus_allowed;
517 539
518#ifdef CONFIG_PREEMPT_RCU 540#ifdef CONFIG_PREEMPT_RCU
519 int rcu_read_lock_nesting; 541 int rcu_read_lock_nesting;
520 union rcu_special rcu_read_unlock_special; 542 union rcu_special rcu_read_unlock_special;
521 struct list_head rcu_node_entry; 543 struct list_head rcu_node_entry;
522 struct rcu_node *rcu_blocked_node; 544 struct rcu_node *rcu_blocked_node;
523#endif /* #ifdef CONFIG_PREEMPT_RCU */ 545#endif /* #ifdef CONFIG_PREEMPT_RCU */
546
524#ifdef CONFIG_TASKS_RCU 547#ifdef CONFIG_TASKS_RCU
525 unsigned long rcu_tasks_nvcsw; 548 unsigned long rcu_tasks_nvcsw;
526 bool rcu_tasks_holdout; 549 bool rcu_tasks_holdout;
527 struct list_head rcu_tasks_holdout_list; 550 struct list_head rcu_tasks_holdout_list;
528 int rcu_tasks_idle_cpu; 551 int rcu_tasks_idle_cpu;
529#endif /* #ifdef CONFIG_TASKS_RCU */ 552#endif /* #ifdef CONFIG_TASKS_RCU */
530 553
531 struct sched_info sched_info; 554 struct sched_info sched_info;
532 555
533 struct list_head tasks; 556 struct list_head tasks;
534#ifdef CONFIG_SMP 557#ifdef CONFIG_SMP
535 struct plist_node pushable_tasks; 558 struct plist_node pushable_tasks;
536 struct rb_node pushable_dl_tasks; 559 struct rb_node pushable_dl_tasks;
537#endif 560#endif
538 561
539 struct mm_struct *mm, *active_mm; 562 struct mm_struct *mm;
563 struct mm_struct *active_mm;
540 564
541 /* Per-thread vma caching: */ 565 /* Per-thread vma caching: */
542 struct vmacache vmacache; 566 struct vmacache vmacache;
543 567
544#if defined(SPLIT_RSS_COUNTING) 568#ifdef SPLIT_RSS_COUNTING
545 struct task_rss_stat rss_stat; 569 struct task_rss_stat rss_stat;
546#endif 570#endif
547/* task state */ 571 int exit_state;
548 int exit_state; 572 int exit_code;
549 int exit_code, exit_signal; 573 int exit_signal;
550 int pdeath_signal; /* The signal sent when the parent dies */ 574 /* The signal sent when the parent dies: */
551 unsigned long jobctl; /* JOBCTL_*, siglock protected */ 575 int pdeath_signal;
552 576 /* JOBCTL_*, siglock protected: */
553 /* Used for emulating ABI behavior of previous Linux versions */ 577 unsigned long jobctl;
554 unsigned int personality; 578
555 579 /* Used for emulating ABI behavior of previous Linux versions: */
556 /* scheduler bits, serialized by scheduler locks */ 580 unsigned int personality;
557 unsigned sched_reset_on_fork:1; 581
558 unsigned sched_contributes_to_load:1; 582 /* Scheduler bits, serialized by scheduler locks: */
559 unsigned sched_migrated:1; 583 unsigned sched_reset_on_fork:1;
560 unsigned sched_remote_wakeup:1; 584 unsigned sched_contributes_to_load:1;
561 unsigned :0; /* force alignment to the next boundary */ 585 unsigned sched_migrated:1;
562 586 unsigned sched_remote_wakeup:1;
563 /* unserialized, strictly 'current' */ 587 /* Force alignment to the next boundary: */
564 unsigned in_execve:1; /* bit to tell LSMs we're in execve */ 588 unsigned :0;
565 unsigned in_iowait:1; 589
566#if !defined(TIF_RESTORE_SIGMASK) 590 /* Unserialized, strictly 'current' */
567 unsigned restore_sigmask:1; 591
592 /* Bit to tell LSMs we're in execve(): */
593 unsigned in_execve:1;
594 unsigned in_iowait:1;
595#ifndef TIF_RESTORE_SIGMASK
596 unsigned restore_sigmask:1;
568#endif 597#endif
569#ifdef CONFIG_MEMCG 598#ifdef CONFIG_MEMCG
570 unsigned memcg_may_oom:1; 599 unsigned memcg_may_oom:1;
571#ifndef CONFIG_SLOB 600#ifndef CONFIG_SLOB
572 unsigned memcg_kmem_skip_account:1; 601 unsigned memcg_kmem_skip_account:1;
573#endif 602#endif
574#endif 603#endif
575#ifdef CONFIG_COMPAT_BRK 604#ifdef CONFIG_COMPAT_BRK
576 unsigned brk_randomized:1; 605 unsigned brk_randomized:1;
577#endif 606#endif
578 607
579 unsigned long atomic_flags; /* Flags needing atomic access. */ 608 unsigned long atomic_flags; /* Flags requiring atomic access. */
580 609
581 struct restart_block restart_block; 610 struct restart_block restart_block;
582 611
583 pid_t pid; 612 pid_t pid;
584 pid_t tgid; 613 pid_t tgid;
585 614
586#ifdef CONFIG_CC_STACKPROTECTOR 615#ifdef CONFIG_CC_STACKPROTECTOR
587 /* Canary value for the -fstack-protector gcc feature */ 616 /* Canary value for the -fstack-protector GCC feature: */
588 unsigned long stack_canary; 617 unsigned long stack_canary;
589#endif 618#endif
590 /* 619 /*
591 * pointers to (original) parent process, youngest child, younger sibling, 620 * Pointers to the (original) parent process, youngest child, younger sibling,
592 * older sibling, respectively. (p->father can be replaced with 621 * older sibling, respectively. (p->father can be replaced with
593 * p->real_parent->pid) 622 * p->real_parent->pid)
594 */ 623 */
595 struct task_struct __rcu *real_parent; /* real parent process */ 624
596 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ 625 /* Real parent process: */
626 struct task_struct __rcu *real_parent;
627
628 /* Recipient of SIGCHLD, wait4() reports: */
629 struct task_struct __rcu *parent;
630
597 /* 631 /*
598 * children/sibling forms the list of my natural children 632 * Children/sibling form the list of natural children:
599 */ 633 */
600 struct list_head children; /* list of my children */ 634 struct list_head children;
601 struct list_head sibling; /* linkage in my parent's children list */ 635 struct list_head sibling;
602 struct task_struct *group_leader; /* threadgroup leader */ 636 struct task_struct *group_leader;
603 637
604 /* 638 /*
605 * ptraced is the list of tasks this task is using ptrace on. 639 * 'ptraced' is the list of tasks this task is using ptrace() on.
640 *
606 * This includes both natural children and PTRACE_ATTACH targets. 641 * This includes both natural children and PTRACE_ATTACH targets.
607 * p->ptrace_entry is p's link on the p->parent->ptraced list. 642 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
608 */ 643 */
609 struct list_head ptraced; 644 struct list_head ptraced;
610 struct list_head ptrace_entry; 645 struct list_head ptrace_entry;
611 646
612 /* PID/PID hash table linkage. */ 647 /* PID/PID hash table linkage. */
613 struct pid_link pids[PIDTYPE_MAX]; 648 struct pid_link pids[PIDTYPE_MAX];
614 struct list_head thread_group; 649 struct list_head thread_group;
615 struct list_head thread_node; 650 struct list_head thread_node;
651
652 struct completion *vfork_done;
616 653
617 struct completion *vfork_done; /* for vfork() */ 654 /* CLONE_CHILD_SETTID: */
618 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 655 int __user *set_child_tid;
619 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
620 656
621 u64 utime, stime; 657 /* CLONE_CHILD_CLEARTID: */
658 int __user *clear_child_tid;
659
660 u64 utime;
661 u64 stime;
622#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 662#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
623 u64 utimescaled, stimescaled; 663 u64 utimescaled;
664 u64 stimescaled;
624#endif 665#endif
625 u64 gtime; 666 u64 gtime;
626 struct prev_cputime prev_cputime; 667 struct prev_cputime prev_cputime;
627#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 668#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
628 seqcount_t vtime_seqcount; 669 seqcount_t vtime_seqcount;
629 unsigned long long vtime_snap; 670 unsigned long long vtime_snap;
630 enum { 671 enum {
631 /* Task is sleeping or running in a CPU with VTIME inactive */ 672 /* Task is sleeping or running in a CPU with VTIME inactive: */
632 VTIME_INACTIVE = 0, 673 VTIME_INACTIVE = 0,
633 /* Task runs in userspace in a CPU with VTIME active */ 674 /* Task runs in userspace in a CPU with VTIME active: */
634 VTIME_USER, 675 VTIME_USER,
635 /* Task runs in kernelspace in a CPU with VTIME active */ 676 /* Task runs in kernelspace in a CPU with VTIME active: */
636 VTIME_SYS, 677 VTIME_SYS,
637 } vtime_snap_whence; 678 } vtime_snap_whence;
638#endif 679#endif
639 680
640#ifdef CONFIG_NO_HZ_FULL 681#ifdef CONFIG_NO_HZ_FULL
641 atomic_t tick_dep_mask; 682 atomic_t tick_dep_mask;
642#endif 683#endif
643 unsigned long nvcsw, nivcsw; /* context switch counts */ 684 /* Context switch counts: */
644 u64 start_time; /* monotonic time in nsec */ 685 unsigned long nvcsw;
645 u64 real_start_time; /* boot based time in nsec */ 686 unsigned long nivcsw;
646/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 687
647 unsigned long min_flt, maj_flt; 688 /* Monotonic time in nsecs: */
689 u64 start_time;
690
691 /* Boot based time in nsecs: */
692 u64 real_start_time;
693
694 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
695 unsigned long min_flt;
696 unsigned long maj_flt;
648 697
649#ifdef CONFIG_POSIX_TIMERS 698#ifdef CONFIG_POSIX_TIMERS
650 struct task_cputime cputime_expires; 699 struct task_cputime cputime_expires;
651 struct list_head cpu_timers[3]; 700 struct list_head cpu_timers[3];
652#endif 701#endif
653 702
654/* process credentials */ 703 /* Process credentials: */
655 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ 704
656 const struct cred __rcu *real_cred; /* objective and real subjective task 705 /* Tracer's credentials at attach: */
657 * credentials (COW) */ 706 const struct cred __rcu *ptracer_cred;
658 const struct cred __rcu *cred; /* effective (overridable) subjective task 707
659 * credentials (COW) */ 708 /* Objective and real subjective task credentials (COW): */
660 char comm[TASK_COMM_LEN]; /* executable name excluding path 709 const struct cred __rcu *real_cred;
661 - access with [gs]et_task_comm (which lock 710
662 it with task_lock()) 711 /* Effective (overridable) subjective task credentials (COW): */
663 - initialized normally by setup_new_exec */ 712 const struct cred __rcu *cred;
664/* file system info */ 713
665 struct nameidata *nameidata; 714 /*
715 * executable name, excluding path.
716 *
717 * - normally initialized setup_new_exec()
718 * - access it with [gs]et_task_comm()
719 * - lock it with task_lock()
720 */
721 char comm[TASK_COMM_LEN];
722
723 struct nameidata *nameidata;
724
666#ifdef CONFIG_SYSVIPC 725#ifdef CONFIG_SYSVIPC
667/* ipc stuff */ 726 struct sysv_sem sysvsem;
668 struct sysv_sem sysvsem; 727 struct sysv_shm sysvshm;
669 struct sysv_shm sysvshm;
670#endif 728#endif
671#ifdef CONFIG_DETECT_HUNG_TASK 729#ifdef CONFIG_DETECT_HUNG_TASK
672/* hung task detection */ 730 unsigned long last_switch_count;
673 unsigned long last_switch_count; 731#endif
674#endif 732 /* Filesystem information: */
675/* filesystem information */ 733 struct fs_struct *fs;
676 struct fs_struct *fs; 734
677/* open file information */ 735 /* Open file information: */
678 struct files_struct *files; 736 struct files_struct *files;
679/* namespaces */ 737
680 struct nsproxy *nsproxy; 738 /* Namespaces: */
681/* signal handlers */ 739 struct nsproxy *nsproxy;
682 struct signal_struct *signal; 740
683 struct sighand_struct *sighand; 741 /* Signal handlers: */
684 742 struct signal_struct *signal;
685 sigset_t blocked, real_blocked; 743 struct sighand_struct *sighand;
686 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 744 sigset_t blocked;
687 struct sigpending pending; 745 sigset_t real_blocked;
688 746 /* Restored if set_restore_sigmask() was used: */
689 unsigned long sas_ss_sp; 747 sigset_t saved_sigmask;
690 size_t sas_ss_size; 748 struct sigpending pending;
691 unsigned sas_ss_flags; 749 unsigned long sas_ss_sp;
692 750 size_t sas_ss_size;
693 struct callback_head *task_works; 751 unsigned int sas_ss_flags;
694 752
695 struct audit_context *audit_context; 753 struct callback_head *task_works;
754
755 struct audit_context *audit_context;
696#ifdef CONFIG_AUDITSYSCALL 756#ifdef CONFIG_AUDITSYSCALL
697 kuid_t loginuid; 757 kuid_t loginuid;
698 unsigned int sessionid; 758 unsigned int sessionid;
699#endif 759#endif
700 struct seccomp seccomp; 760 struct seccomp seccomp;
701 761
702/* Thread group tracking */ 762 /* Thread group tracking: */
703 u32 parent_exec_id; 763 u32 parent_exec_id;
704 u32 self_exec_id; 764 u32 self_exec_id;
705/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 765
706 * mempolicy */ 766 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
707 spinlock_t alloc_lock; 767 spinlock_t alloc_lock;
708 768
709 /* Protection of the PI data structures: */ 769 /* Protection of the PI data structures: */
710 raw_spinlock_t pi_lock; 770 raw_spinlock_t pi_lock;
711 771
712 struct wake_q_node wake_q; 772 struct wake_q_node wake_q;
713 773
714#ifdef CONFIG_RT_MUTEXES 774#ifdef CONFIG_RT_MUTEXES
715 /* PI waiters blocked on a rt_mutex held by this task */ 775 /* PI waiters blocked on a rt_mutex held by this task: */
716 struct rb_root pi_waiters; 776 struct rb_root pi_waiters;
717 struct rb_node *pi_waiters_leftmost; 777 struct rb_node *pi_waiters_leftmost;
718 /* Deadlock detection and priority inheritance handling */ 778 /* Deadlock detection and priority inheritance handling: */
719 struct rt_mutex_waiter *pi_blocked_on; 779 struct rt_mutex_waiter *pi_blocked_on;
720#endif 780#endif
721 781
722#ifdef CONFIG_DEBUG_MUTEXES 782#ifdef CONFIG_DEBUG_MUTEXES
723 /* mutex deadlock detection */ 783 /* Mutex deadlock detection: */
724 struct mutex_waiter *blocked_on; 784 struct mutex_waiter *blocked_on;
725#endif 785#endif
786
726#ifdef CONFIG_TRACE_IRQFLAGS 787#ifdef CONFIG_TRACE_IRQFLAGS
727 unsigned int irq_events; 788 unsigned int irq_events;
728 unsigned long hardirq_enable_ip; 789 unsigned long hardirq_enable_ip;
729 unsigned long hardirq_disable_ip; 790 unsigned long hardirq_disable_ip;
730 unsigned int hardirq_enable_event; 791 unsigned int hardirq_enable_event;
731 unsigned int hardirq_disable_event; 792 unsigned int hardirq_disable_event;
732 int hardirqs_enabled; 793 int hardirqs_enabled;
733 int hardirq_context; 794 int hardirq_context;
734 unsigned long softirq_disable_ip; 795 unsigned long softirq_disable_ip;
735 unsigned long softirq_enable_ip; 796 unsigned long softirq_enable_ip;
736 unsigned int softirq_disable_event; 797 unsigned int softirq_disable_event;
737 unsigned int softirq_enable_event; 798 unsigned int softirq_enable_event;
738 int softirqs_enabled; 799 int softirqs_enabled;
739 int softirq_context; 800 int softirq_context;
740#endif 801#endif
802
741#ifdef CONFIG_LOCKDEP 803#ifdef CONFIG_LOCKDEP
742# define MAX_LOCK_DEPTH 48UL 804# define MAX_LOCK_DEPTH 48UL
743 u64 curr_chain_key; 805 u64 curr_chain_key;
744 int lockdep_depth; 806 int lockdep_depth;
745 unsigned int lockdep_recursion; 807 unsigned int lockdep_recursion;
746 struct held_lock held_locks[MAX_LOCK_DEPTH]; 808 struct held_lock held_locks[MAX_LOCK_DEPTH];
747 gfp_t lockdep_reclaim_gfp; 809 gfp_t lockdep_reclaim_gfp;
748#endif 810#endif
811
749#ifdef CONFIG_UBSAN 812#ifdef CONFIG_UBSAN
750 unsigned int in_ubsan; 813 unsigned int in_ubsan;
751#endif 814#endif
752 815
753/* journalling filesystem info */ 816 /* Journalling filesystem info: */
754 void *journal_info; 817 void *journal_info;
755 818
756/* stacked block device info */ 819 /* Stacked block device info: */
757 struct bio_list *bio_list; 820 struct bio_list *bio_list;
758 821
759#ifdef CONFIG_BLOCK 822#ifdef CONFIG_BLOCK
760/* stack plugging */ 823 /* Stack plugging: */
761 struct blk_plug *plug; 824 struct blk_plug *plug;
762#endif 825#endif
763 826
764/* VM state */ 827 /* VM state: */
765 struct reclaim_state *reclaim_state; 828 struct reclaim_state *reclaim_state;
829
830 struct backing_dev_info *backing_dev_info;
766 831
767 struct backing_dev_info *backing_dev_info; 832 struct io_context *io_context;
768 833
769 struct io_context *io_context; 834 /* Ptrace state: */
835 unsigned long ptrace_message;
836 siginfo_t *last_siginfo;
770 837
771 unsigned long ptrace_message; 838 struct task_io_accounting ioac;
772 siginfo_t *last_siginfo; /* For ptrace use. */ 839#ifdef CONFIG_TASK_XACCT
773 struct task_io_accounting ioac; 840 /* Accumulated RSS usage: */
774#if defined(CONFIG_TASK_XACCT) 841 u64 acct_rss_mem1;
775 u64 acct_rss_mem1; /* accumulated rss usage */ 842 /* Accumulated virtual memory usage: */
776 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 843 u64 acct_vm_mem1;
777 u64 acct_timexpd; /* stime + utime since last update */ 844 /* stime + utime since last update: */
845 u64 acct_timexpd;
778#endif 846#endif
779#ifdef CONFIG_CPUSETS 847#ifdef CONFIG_CPUSETS
780 nodemask_t mems_allowed; /* Protected by alloc_lock */ 848 /* Protected by ->alloc_lock: */
781 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ 849 nodemask_t mems_allowed;
782 int cpuset_mem_spread_rotor; 850 /* Seqence number to catch updates: */
783 int cpuset_slab_spread_rotor; 851 seqcount_t mems_allowed_seq;
852 int cpuset_mem_spread_rotor;
853 int cpuset_slab_spread_rotor;
784#endif 854#endif
785#ifdef CONFIG_CGROUPS 855#ifdef CONFIG_CGROUPS
786 /* Control Group info protected by css_set_lock */ 856 /* Control Group info protected by css_set_lock: */
787 struct css_set __rcu *cgroups; 857 struct css_set __rcu *cgroups;
788 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 858 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
789 struct list_head cg_list; 859 struct list_head cg_list;
790#endif 860#endif
791#ifdef CONFIG_INTEL_RDT_A 861#ifdef CONFIG_INTEL_RDT_A
792 int closid; 862 int closid;
793#endif 863#endif
794#ifdef CONFIG_FUTEX 864#ifdef CONFIG_FUTEX
795 struct robust_list_head __user *robust_list; 865 struct robust_list_head __user *robust_list;
796#ifdef CONFIG_COMPAT 866#ifdef CONFIG_COMPAT
797 struct compat_robust_list_head __user *compat_robust_list; 867 struct compat_robust_list_head __user *compat_robust_list;
798#endif 868#endif
799 struct list_head pi_state_list; 869 struct list_head pi_state_list;
800 struct futex_pi_state *pi_state_cache; 870 struct futex_pi_state *pi_state_cache;
801#endif 871#endif
802#ifdef CONFIG_PERF_EVENTS 872#ifdef CONFIG_PERF_EVENTS
803 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 873 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
804 struct mutex perf_event_mutex; 874 struct mutex perf_event_mutex;
805 struct list_head perf_event_list; 875 struct list_head perf_event_list;
806#endif 876#endif
807#ifdef CONFIG_DEBUG_PREEMPT 877#ifdef CONFIG_DEBUG_PREEMPT
808 unsigned long preempt_disable_ip; 878 unsigned long preempt_disable_ip;
809#endif 879#endif
810#ifdef CONFIG_NUMA 880#ifdef CONFIG_NUMA
811 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 881 /* Protected by alloc_lock: */
812 short il_next; 882 struct mempolicy *mempolicy;
813 short pref_node_fork; 883 short il_next;
884 short pref_node_fork;
814#endif 885#endif
815#ifdef CONFIG_NUMA_BALANCING 886#ifdef CONFIG_NUMA_BALANCING
816 int numa_scan_seq; 887 int numa_scan_seq;
817 unsigned int numa_scan_period; 888 unsigned int numa_scan_period;
818 unsigned int numa_scan_period_max; 889 unsigned int numa_scan_period_max;
819 int numa_preferred_nid; 890 int numa_preferred_nid;
820 unsigned long numa_migrate_retry; 891 unsigned long numa_migrate_retry;
821 u64 node_stamp; /* migration stamp */ 892 /* Migration stamp: */
822 u64 last_task_numa_placement; 893 u64 node_stamp;
823 u64 last_sum_exec_runtime; 894 u64 last_task_numa_placement;
824 struct callback_head numa_work; 895 u64 last_sum_exec_runtime;
825 896 struct callback_head numa_work;
826 struct list_head numa_entry; 897
827 struct numa_group *numa_group; 898 struct list_head numa_entry;
899 struct numa_group *numa_group;
828 900
829 /* 901 /*
830 * numa_faults is an array split into four regions: 902 * numa_faults is an array split into four regions:
@@ -840,8 +912,8 @@ struct task_struct {
840 * during the current scan window. When the scan completes, the counts 912 * during the current scan window. When the scan completes, the counts
841 * in faults_memory and faults_cpu decay and these values are copied. 913 * in faults_memory and faults_cpu decay and these values are copied.
842 */ 914 */
843 unsigned long *numa_faults; 915 unsigned long *numa_faults;
844 unsigned long total_numa_faults; 916 unsigned long total_numa_faults;
845 917
846 /* 918 /*
847 * numa_faults_locality tracks if faults recorded during the last 919 * numa_faults_locality tracks if faults recorded during the last
@@ -849,119 +921,132 @@ struct task_struct {
849 * period is adapted based on the locality of the faults with different 921 * period is adapted based on the locality of the faults with different
850 * weights depending on whether they were shared or private faults 922 * weights depending on whether they were shared or private faults
851 */ 923 */
852 unsigned long numa_faults_locality[3]; 924 unsigned long numa_faults_locality[3];
853 925
854 unsigned long numa_pages_migrated; 926 unsigned long numa_pages_migrated;
855#endif /* CONFIG_NUMA_BALANCING */ 927#endif /* CONFIG_NUMA_BALANCING */
856 928
857 struct tlbflush_unmap_batch tlb_ubc; 929 struct tlbflush_unmap_batch tlb_ubc;
858 930
859 struct rcu_head rcu; 931 struct rcu_head rcu;
860 932
861 /* 933 /* Cache last used pipe for splice(): */
862 * cache last used pipe for splice 934 struct pipe_inode_info *splice_pipe;
863 */
864 struct pipe_inode_info *splice_pipe;
865 935
866 struct page_frag task_frag; 936 struct page_frag task_frag;
867 937
868#ifdef CONFIG_TASK_DELAY_ACCT 938#ifdef CONFIG_TASK_DELAY_ACCT
869 struct task_delay_info *delays; 939 struct task_delay_info *delays;
870#endif 940#endif
871 941
872#ifdef CONFIG_FAULT_INJECTION 942#ifdef CONFIG_FAULT_INJECTION
873 int make_it_fail; 943 int make_it_fail;
874#endif 944#endif
875 /* 945 /*
876 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 946 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
877 * balance_dirty_pages() for some dirty throttling pause 947 * balance_dirty_pages() for a dirty throttling pause:
878 */ 948 */
879 int nr_dirtied; 949 int nr_dirtied;
880 int nr_dirtied_pause; 950 int nr_dirtied_pause;
881 unsigned long dirty_paused_when; /* start of a write-and-pause period */ 951 /* Start of a write-and-pause period: */
952 unsigned long dirty_paused_when;
882 953
883#ifdef CONFIG_LATENCYTOP 954#ifdef CONFIG_LATENCYTOP
884 int latency_record_count; 955 int latency_record_count;
885 struct latency_record latency_record[LT_SAVECOUNT]; 956 struct latency_record latency_record[LT_SAVECOUNT];
886#endif 957#endif
887 /* 958 /*
888 * time slack values; these are used to round up poll() and 959 * Time slack values; these are used to round up poll() and
889 * select() etc timeout values. These are in nanoseconds. 960 * select() etc timeout values. These are in nanoseconds.
890 */ 961 */
891 u64 timer_slack_ns; 962 u64 timer_slack_ns;
892 u64 default_timer_slack_ns; 963 u64 default_timer_slack_ns;
893 964
894#ifdef CONFIG_KASAN 965#ifdef CONFIG_KASAN
895 unsigned int kasan_depth; 966 unsigned int kasan_depth;
896#endif 967#endif
968
897#ifdef CONFIG_FUNCTION_GRAPH_TRACER 969#ifdef CONFIG_FUNCTION_GRAPH_TRACER
898 /* Index of current stored address in ret_stack */ 970 /* Index of current stored address in ret_stack: */
899 int curr_ret_stack; 971 int curr_ret_stack;
900 /* Stack of return addresses for return function tracing */ 972
901 struct ftrace_ret_stack *ret_stack; 973 /* Stack of return addresses for return function tracing: */
902 /* time stamp for last schedule */ 974 struct ftrace_ret_stack *ret_stack;
903 unsigned long long ftrace_timestamp; 975
976 /* Timestamp for last schedule: */
977 unsigned long long ftrace_timestamp;
978
904 /* 979 /*
905 * Number of functions that haven't been traced 980 * Number of functions that haven't been traced
906 * because of depth overrun. 981 * because of depth overrun:
907 */ 982 */
908 atomic_t trace_overrun; 983 atomic_t trace_overrun;
909 /* Pause for the tracing */ 984
910 atomic_t tracing_graph_pause; 985 /* Pause tracing: */
986 atomic_t tracing_graph_pause;
911#endif 987#endif
988
912#ifdef CONFIG_TRACING 989#ifdef CONFIG_TRACING
913 /* state flags for use by tracers */ 990 /* State flags for use by tracers: */
914 unsigned long trace; 991 unsigned long trace;
915 /* bitmask and counter of trace recursion */ 992
916 unsigned long trace_recursion; 993 /* Bitmask and counter of trace recursion: */
994 unsigned long trace_recursion;
917#endif /* CONFIG_TRACING */ 995#endif /* CONFIG_TRACING */
996
918#ifdef CONFIG_KCOV 997#ifdef CONFIG_KCOV
919 /* Coverage collection mode enabled for this task (0 if disabled). */ 998 /* Coverage collection mode enabled for this task (0 if disabled): */
920 enum kcov_mode kcov_mode; 999 enum kcov_mode kcov_mode;
921 /* Size of the kcov_area. */ 1000
922 unsigned kcov_size; 1001 /* Size of the kcov_area: */
923 /* Buffer for coverage collection. */ 1002 unsigned int kcov_size;
924 void *kcov_area; 1003
925 /* kcov desciptor wired with this task or NULL. */ 1004 /* Buffer for coverage collection: */
926 struct kcov *kcov; 1005 void *kcov_area;
1006
1007 /* KCOV descriptor wired with this task or NULL: */
1008 struct kcov *kcov;
927#endif 1009#endif
1010
928#ifdef CONFIG_MEMCG 1011#ifdef CONFIG_MEMCG
929 struct mem_cgroup *memcg_in_oom; 1012 struct mem_cgroup *memcg_in_oom;
930 gfp_t memcg_oom_gfp_mask; 1013 gfp_t memcg_oom_gfp_mask;
931 int memcg_oom_order; 1014 int memcg_oom_order;
932 1015
933 /* number of pages to reclaim on returning to userland */ 1016 /* Number of pages to reclaim on returning to userland: */
934 unsigned int memcg_nr_pages_over_high; 1017 unsigned int memcg_nr_pages_over_high;
935#endif 1018#endif
1019
936#ifdef CONFIG_UPROBES 1020#ifdef CONFIG_UPROBES
937 struct uprobe_task *utask; 1021 struct uprobe_task *utask;
938#endif 1022#endif
939#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1023#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
940 unsigned int sequential_io; 1024 unsigned int sequential_io;
941 unsigned int sequential_io_avg; 1025 unsigned int sequential_io_avg;
942#endif 1026#endif
943#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1027#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
944 unsigned long task_state_change; 1028 unsigned long task_state_change;
945#endif 1029#endif
946 int pagefault_disabled; 1030 int pagefault_disabled;
947#ifdef CONFIG_MMU 1031#ifdef CONFIG_MMU
948 struct task_struct *oom_reaper_list; 1032 struct task_struct *oom_reaper_list;
949#endif 1033#endif
950#ifdef CONFIG_VMAP_STACK 1034#ifdef CONFIG_VMAP_STACK
951 struct vm_struct *stack_vm_area; 1035 struct vm_struct *stack_vm_area;
952#endif 1036#endif
953#ifdef CONFIG_THREAD_INFO_IN_TASK 1037#ifdef CONFIG_THREAD_INFO_IN_TASK
954 /* A live task holds one reference. */ 1038 /* A live task holds one reference: */
955 atomic_t stack_refcount; 1039 atomic_t stack_refcount;
956#endif 1040#endif
957/* CPU-specific state of this task */ 1041 /* CPU-specific state of this task: */
958 struct thread_struct thread; 1042 struct thread_struct thread;
959/* 1043
960 * WARNING: on x86, 'thread_struct' contains a variable-sized 1044 /*
961 * structure. It *MUST* be at the end of 'task_struct'. 1045 * WARNING: on x86, 'thread_struct' contains a variable-sized
962 * 1046 * structure. It *MUST* be at the end of 'task_struct'.
963 * Do not put anything below here! 1047 *
964 */ 1048 * Do not put anything below here!
1049 */
965}; 1050};
966 1051
967static inline struct pid *task_pid(struct task_struct *task) 1052static inline struct pid *task_pid(struct task_struct *task)
@@ -975,7 +1060,7 @@ static inline struct pid *task_tgid(struct task_struct *task)
975} 1060}
976 1061
977/* 1062/*
978 * Without tasklist or rcu lock it is not safe to dereference 1063 * Without tasklist or RCU lock it is not safe to dereference
979 * the result of task_pgrp/task_session even if task == current, 1064 * the result of task_pgrp/task_session even if task == current,
980 * we can race with another thread doing sys_setsid/sys_setpgid. 1065 * we can race with another thread doing sys_setsid/sys_setpgid.
981 */ 1066 */
@@ -1002,16 +1087,14 @@ static inline struct pid *task_session(struct task_struct *task)
1002 * 1087 *
1003 * see also pid_nr() etc in include/linux/pid.h 1088 * see also pid_nr() etc in include/linux/pid.h
1004 */ 1089 */
1005pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1090pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1006 struct pid_namespace *ns);
1007 1091
1008static inline pid_t task_pid_nr(struct task_struct *tsk) 1092static inline pid_t task_pid_nr(struct task_struct *tsk)
1009{ 1093{
1010 return tsk->pid; 1094 return tsk->pid;
1011} 1095}
1012 1096
1013static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1097static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1014 struct pid_namespace *ns)
1015{ 1098{
1016 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1099 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1017} 1100}
@@ -1027,15 +1110,28 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
1027 return tsk->tgid; 1110 return tsk->tgid;
1028} 1111}
1029 1112
1030pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1113extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1031 1114
1032static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1115static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1033{ 1116{
1034 return pid_vnr(task_tgid(tsk)); 1117 return pid_vnr(task_tgid(tsk));
1035} 1118}
1036 1119
1120/**
1121 * pid_alive - check that a task structure is not stale
1122 * @p: Task structure to be checked.
1123 *
1124 * Test if a process is not yet dead (at most zombie state)
1125 * If pid_alive fails, then pointers within the task structure
1126 * can be stale and must not be dereferenced.
1127 *
1128 * Return: 1 if the process is alive. 0 otherwise.
1129 */
1130static inline int pid_alive(const struct task_struct *p)
1131{
1132 return p->pids[PIDTYPE_PID].pid != NULL;
1133}
1037 1134
1038static inline int pid_alive(const struct task_struct *p);
1039static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1135static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1040{ 1136{
1041 pid_t pid = 0; 1137 pid_t pid = 0;
@@ -1053,8 +1149,7 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1053 return task_ppid_nr_ns(tsk, &init_pid_ns); 1149 return task_ppid_nr_ns(tsk, &init_pid_ns);
1054} 1150}
1055 1151
1056static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1152static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1057 struct pid_namespace *ns)
1058{ 1153{
1059 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1154 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1060} 1155}
@@ -1065,8 +1160,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1065} 1160}
1066 1161
1067 1162
1068static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1163static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1069 struct pid_namespace *ns)
1070{ 1164{
1071 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1165 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1072} 1166}
@@ -1076,28 +1170,13 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
1076 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1170 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1077} 1171}
1078 1172
1079/* obsolete, do not use */ 1173/* Obsolete, do not use: */
1080static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1174static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1081{ 1175{
1082 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1176 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1083} 1177}
1084 1178
1085/** 1179/**
1086 * pid_alive - check that a task structure is not stale
1087 * @p: Task structure to be checked.
1088 *
1089 * Test if a process is not yet dead (at most zombie state)
1090 * If pid_alive fails, then pointers within the task structure
1091 * can be stale and must not be dereferenced.
1092 *
1093 * Return: 1 if the process is alive. 0 otherwise.
1094 */
1095static inline int pid_alive(const struct task_struct *p)
1096{
1097 return p->pids[PIDTYPE_PID].pid != NULL;
1098}
1099
1100/**
1101 * is_global_init - check if a task structure is init. Since init 1180 * is_global_init - check if a task structure is init. Since init
1102 * is free to have sub-threads we need to check tgid. 1181 * is free to have sub-threads we need to check tgid.
1103 * @tsk: Task structure to be checked. 1182 * @tsk: Task structure to be checked.
@@ -1116,34 +1195,34 @@ extern struct pid *cad_pid;
1116/* 1195/*
1117 * Per process flags 1196 * Per process flags
1118 */ 1197 */
1119#define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1198#define PF_IDLE 0x00000002 /* I am an IDLE thread */
1120#define PF_EXITING 0x00000004 /* getting shut down */ 1199#define PF_EXITING 0x00000004 /* Getting shut down */
1121#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1200#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
1122#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1201#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1123#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1202#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1124#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1203#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
1125#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1204#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
1126#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1205#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1127#define PF_DUMPCORE 0x00000200 /* dumped core */ 1206#define PF_DUMPCORE 0x00000200 /* Dumped core */
1128#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1207#define PF_SIGNALED 0x00000400 /* Killed by a signal */
1129#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1208#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1130#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1209#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
1131#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1210#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
1132#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ 1211#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
1133#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1212#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
1134#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1213#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
1135#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1214#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */
1136#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1215#define PF_KSWAPD 0x00040000 /* I am kswapd */
1137#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 1216#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1138#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1217#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1139#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1218#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1140#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1219#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1141#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1220#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1142#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1221#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1143#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1222#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1144#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1223#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1145#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1224#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1146#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ 1225#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
1147 1226
1148/* 1227/*
1149 * Only the _current_ task can read/write to tsk->flags, but other 1228 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1156,33 +1235,38 @@ extern struct pid *cad_pid;
1156 * child is not running and in turn not changing child->flags 1235 * child is not running and in turn not changing child->flags
1157 * at the same time the parent does it. 1236 * at the same time the parent does it.
1158 */ 1237 */
1159#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1238#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1160#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1239#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1161#define clear_used_math() clear_stopped_child_used_math(current) 1240#define clear_used_math() clear_stopped_child_used_math(current)
1162#define set_used_math() set_stopped_child_used_math(current) 1241#define set_used_math() set_stopped_child_used_math(current)
1242
1163#define conditional_stopped_child_used_math(condition, child) \ 1243#define conditional_stopped_child_used_math(condition, child) \
1164 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1244 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1165#define conditional_used_math(condition) \ 1245
1166 conditional_stopped_child_used_math(condition, current) 1246#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1247
1167#define copy_to_stopped_child_used_math(child) \ 1248#define copy_to_stopped_child_used_math(child) \
1168 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1249 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1250
1169/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1251/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1170#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1252#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1171#define used_math() tsk_used_math(current) 1253#define used_math() tsk_used_math(current)
1172 1254
1173/* Per-process atomic flags. */ 1255/* Per-process atomic flags. */
1174#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1256#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1175#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1257#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1176#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1258#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1177#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ 1259#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1178 1260
1179 1261
1180#define TASK_PFA_TEST(name, func) \ 1262#define TASK_PFA_TEST(name, func) \
1181 static inline bool task_##func(struct task_struct *p) \ 1263 static inline bool task_##func(struct task_struct *p) \
1182 { return test_bit(PFA_##name, &p->atomic_flags); } 1264 { return test_bit(PFA_##name, &p->atomic_flags); }
1265
1183#define TASK_PFA_SET(name, func) \ 1266#define TASK_PFA_SET(name, func) \
1184 static inline void task_set_##func(struct task_struct *p) \ 1267 static inline void task_set_##func(struct task_struct *p) \
1185 { set_bit(PFA_##name, &p->atomic_flags); } 1268 { set_bit(PFA_##name, &p->atomic_flags); }
1269
1186#define TASK_PFA_CLEAR(name, func) \ 1270#define TASK_PFA_CLEAR(name, func) \
1187 static inline void task_clear_##func(struct task_struct *p) \ 1271 static inline void task_clear_##func(struct task_struct *p) \
1188 { clear_bit(PFA_##name, &p->atomic_flags); } 1272 { clear_bit(PFA_##name, &p->atomic_flags); }
@@ -1201,30 +1285,23 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1201TASK_PFA_TEST(LMK_WAITING, lmk_waiting) 1285TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
1202TASK_PFA_SET(LMK_WAITING, lmk_waiting) 1286TASK_PFA_SET(LMK_WAITING, lmk_waiting)
1203 1287
1204static inline void tsk_restore_flags(struct task_struct *task, 1288static inline void
1205 unsigned long orig_flags, unsigned long flags) 1289tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags)
1206{ 1290{
1207 task->flags &= ~flags; 1291 task->flags &= ~flags;
1208 task->flags |= orig_flags & flags; 1292 task->flags |= orig_flags & flags;
1209} 1293}
1210 1294
1211extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, 1295extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1212 const struct cpumask *trial); 1296extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1213extern int task_can_attach(struct task_struct *p,
1214 const struct cpumask *cs_cpus_allowed);
1215#ifdef CONFIG_SMP 1297#ifdef CONFIG_SMP
1216extern void do_set_cpus_allowed(struct task_struct *p, 1298extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1217 const struct cpumask *new_mask); 1299extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1218
1219extern int set_cpus_allowed_ptr(struct task_struct *p,
1220 const struct cpumask *new_mask);
1221#else 1300#else
1222static inline void do_set_cpus_allowed(struct task_struct *p, 1301static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1223 const struct cpumask *new_mask)
1224{ 1302{
1225} 1303}
1226static inline int set_cpus_allowed_ptr(struct task_struct *p, 1304static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1227 const struct cpumask *new_mask)
1228{ 1305{
1229 if (!cpumask_test_cpu(0, new_mask)) 1306 if (!cpumask_test_cpu(0, new_mask))
1230 return -EINVAL; 1307 return -EINVAL;
@@ -1239,6 +1316,7 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
1239extern int yield_to(struct task_struct *p, bool preempt); 1316extern int yield_to(struct task_struct *p, bool preempt);
1240extern void set_user_nice(struct task_struct *p, long nice); 1317extern void set_user_nice(struct task_struct *p, long nice);
1241extern int task_prio(const struct task_struct *p); 1318extern int task_prio(const struct task_struct *p);
1319
1242/** 1320/**
1243 * task_nice - return the nice value of a given task. 1321 * task_nice - return the nice value of a given task.
1244 * @p: the task in question. 1322 * @p: the task in question.
@@ -1249,16 +1327,15 @@ static inline int task_nice(const struct task_struct *p)
1249{ 1327{
1250 return PRIO_TO_NICE((p)->static_prio); 1328 return PRIO_TO_NICE((p)->static_prio);
1251} 1329}
1330
1252extern int can_nice(const struct task_struct *p, const int nice); 1331extern int can_nice(const struct task_struct *p, const int nice);
1253extern int task_curr(const struct task_struct *p); 1332extern int task_curr(const struct task_struct *p);
1254extern int idle_cpu(int cpu); 1333extern int idle_cpu(int cpu);
1255extern int sched_setscheduler(struct task_struct *, int, 1334extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1256 const struct sched_param *); 1335extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1257extern int sched_setscheduler_nocheck(struct task_struct *, int, 1336extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1258 const struct sched_param *);
1259extern int sched_setattr(struct task_struct *,
1260 const struct sched_attr *);
1261extern struct task_struct *idle_task(int cpu); 1337extern struct task_struct *idle_task(int cpu);
1338
1262/** 1339/**
1263 * is_idle_task - is the specified task an idle task? 1340 * is_idle_task - is the specified task an idle task?
1264 * @p: the task in question. 1341 * @p: the task in question.
@@ -1269,6 +1346,7 @@ static inline bool is_idle_task(const struct task_struct *p)
1269{ 1346{
1270 return !!(p->flags & PF_IDLE); 1347 return !!(p->flags & PF_IDLE);
1271} 1348}
1349
1272extern struct task_struct *curr_task(int cpu); 1350extern struct task_struct *curr_task(int cpu);
1273extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1351extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1274 1352
@@ -1302,23 +1380,25 @@ static inline struct thread_info *task_thread_info(struct task_struct *task)
1302 */ 1380 */
1303 1381
1304extern struct task_struct *find_task_by_vpid(pid_t nr); 1382extern struct task_struct *find_task_by_vpid(pid_t nr);
1305extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1383extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1306 struct pid_namespace *ns);
1307 1384
1308extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1385extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1309extern int wake_up_process(struct task_struct *tsk); 1386extern int wake_up_process(struct task_struct *tsk);
1310extern void wake_up_new_task(struct task_struct *tsk); 1387extern void wake_up_new_task(struct task_struct *tsk);
1388
1311#ifdef CONFIG_SMP 1389#ifdef CONFIG_SMP
1312 extern void kick_process(struct task_struct *tsk); 1390extern void kick_process(struct task_struct *tsk);
1313#else 1391#else
1314 static inline void kick_process(struct task_struct *tsk) { } 1392static inline void kick_process(struct task_struct *tsk) { }
1315#endif 1393#endif
1316 1394
1317extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1395extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1396
1318static inline void set_task_comm(struct task_struct *tsk, const char *from) 1397static inline void set_task_comm(struct task_struct *tsk, const char *from)
1319{ 1398{
1320 __set_task_comm(tsk, from, false); 1399 __set_task_comm(tsk, from, false);
1321} 1400}
1401
1322extern char *get_task_comm(char *to, struct task_struct *tsk); 1402extern char *get_task_comm(char *to, struct task_struct *tsk);
1323 1403
1324#ifdef CONFIG_SMP 1404#ifdef CONFIG_SMP
@@ -1326,15 +1406,15 @@ void scheduler_ipi(void);
1326extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1406extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1327#else 1407#else
1328static inline void scheduler_ipi(void) { } 1408static inline void scheduler_ipi(void) { }
1329static inline unsigned long wait_task_inactive(struct task_struct *p, 1409static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1330 long match_state)
1331{ 1410{
1332 return 1; 1411 return 1;
1333} 1412}
1334#endif 1413#endif
1335 1414
1336/* set thread flags in other task's structures 1415/*
1337 * - see asm/thread_info.h for TIF_xxxx flags available 1416 * Set thread flags in other task's structures.
1417 * See asm/thread_info.h for TIF_xxxx flags available:
1338 */ 1418 */
1339static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1419static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1340{ 1420{