aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h131
1 files changed, 103 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b58afd97a180..dec5827c7742 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -246,7 +246,7 @@ struct mm_struct {
246 246
247 unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ 247 unsigned long saved_auxv[42]; /* for /proc/PID/auxv */
248 248
249 unsigned dumpable:1; 249 unsigned dumpable:2;
250 cpumask_t cpu_vm_mask; 250 cpumask_t cpu_vm_mask;
251 251
252 /* Architecture-specific MM context */ 252 /* Architecture-specific MM context */
@@ -368,6 +368,11 @@ struct signal_struct {
368#endif 368#endif
369}; 369};
370 370
371/* Context switch must be unlocked if interrupts are to be enabled */
372#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
373# define __ARCH_WANT_UNLOCKED_CTXSW
374#endif
375
371/* 376/*
372 * Bits in flags field of signal_struct. 377 * Bits in flags field of signal_struct.
373 */ 378 */
@@ -405,6 +410,10 @@ struct user_struct {
405 atomic_t processes; /* How many processes does this user have? */ 410 atomic_t processes; /* How many processes does this user have? */
406 atomic_t files; /* How many open files does this user have? */ 411 atomic_t files; /* How many open files does this user have? */
407 atomic_t sigpending; /* How many pending signals does this user have? */ 412 atomic_t sigpending; /* How many pending signals does this user have? */
413#ifdef CONFIG_INOTIFY
414 atomic_t inotify_watches; /* How many inotify watches does this user have? */
415 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
416#endif
408 /* protected by mq_lock */ 417 /* protected by mq_lock */
409 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 418 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
410 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 419 unsigned long locked_shm; /* How many pages of mlocked shm ? */
@@ -460,10 +469,11 @@ enum idle_type
460#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 469#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
461#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 470#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
462#define SD_BALANCE_EXEC 4 /* Balance on exec */ 471#define SD_BALANCE_EXEC 4 /* Balance on exec */
463#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ 472#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
464#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ 473#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
465#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ 474#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
466#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ 475#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
476#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
467 477
468struct sched_group { 478struct sched_group {
469 struct sched_group *next; /* Must be a circular list */ 479 struct sched_group *next; /* Must be a circular list */
@@ -488,6 +498,11 @@ struct sched_domain {
488 unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ 498 unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
489 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 499 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
490 unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ 500 unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
501 unsigned int busy_idx;
502 unsigned int idle_idx;
503 unsigned int newidle_idx;
504 unsigned int wake_idx;
505 unsigned int forkexec_idx;
491 int flags; /* See SD_* */ 506 int flags; /* See SD_* */
492 507
493 /* Runtime fields. */ 508 /* Runtime fields. */
@@ -511,10 +526,16 @@ struct sched_domain {
511 unsigned long alb_failed; 526 unsigned long alb_failed;
512 unsigned long alb_pushed; 527 unsigned long alb_pushed;
513 528
514 /* sched_balance_exec() stats */ 529 /* SD_BALANCE_EXEC stats */
515 unsigned long sbe_attempts; 530 unsigned long sbe_cnt;
531 unsigned long sbe_balanced;
516 unsigned long sbe_pushed; 532 unsigned long sbe_pushed;
517 533
534 /* SD_BALANCE_FORK stats */
535 unsigned long sbf_cnt;
536 unsigned long sbf_balanced;
537 unsigned long sbf_pushed;
538
518 /* try_to_wake_up() stats */ 539 /* try_to_wake_up() stats */
519 unsigned long ttwu_wake_remote; 540 unsigned long ttwu_wake_remote;
520 unsigned long ttwu_move_affine; 541 unsigned long ttwu_move_affine;
@@ -522,6 +543,8 @@ struct sched_domain {
522#endif 543#endif
523}; 544};
524 545
546extern void partition_sched_domains(cpumask_t *partition1,
547 cpumask_t *partition2);
525#ifdef ARCH_HAS_SCHED_DOMAIN 548#ifdef ARCH_HAS_SCHED_DOMAIN
526/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ 549/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */
527extern cpumask_t cpu_isolated_map; 550extern cpumask_t cpu_isolated_map;
@@ -561,9 +584,10 @@ struct group_info {
561 groups_free(group_info); \ 584 groups_free(group_info); \
562} while (0) 585} while (0)
563 586
564struct group_info *groups_alloc(int gidsetsize); 587extern struct group_info *groups_alloc(int gidsetsize);
565void groups_free(struct group_info *group_info); 588extern void groups_free(struct group_info *group_info);
566int set_current_groups(struct group_info *group_info); 589extern int set_current_groups(struct group_info *group_info);
590extern int groups_search(struct group_info *group_info, gid_t grp);
567/* access the groups "array" with this macro */ 591/* access the groups "array" with this macro */
568#define GROUP_AT(gi, i) \ 592#define GROUP_AT(gi, i) \
569 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 593 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
@@ -581,10 +605,15 @@ struct task_struct {
581 605
582 int lock_depth; /* BKL lock depth */ 606 int lock_depth; /* BKL lock depth */
583 607
608#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
609 int oncpu;
610#endif
584 int prio, static_prio; 611 int prio, static_prio;
585 struct list_head run_list; 612 struct list_head run_list;
586 prio_array_t *array; 613 prio_array_t *array;
587 614
615 unsigned short ioprio;
616
588 unsigned long sleep_avg; 617 unsigned long sleep_avg;
589 unsigned long long timestamp, last_ran; 618 unsigned long long timestamp, last_ran;
590 unsigned long long sched_time; /* sched_clock time spent running */ 619 unsigned long long sched_time; /* sched_clock time spent running */
@@ -660,6 +689,7 @@ struct task_struct {
660 struct user_struct *user; 689 struct user_struct *user;
661#ifdef CONFIG_KEYS 690#ifdef CONFIG_KEYS
662 struct key *thread_keyring; /* keyring private to this thread */ 691 struct key *thread_keyring; /* keyring private to this thread */
692 unsigned char jit_keyring; /* default keyring to attach requested keys to */
663#endif 693#endif
664 int oomkilladj; /* OOM kill score adjustment (bit shift). */ 694 int oomkilladj; /* OOM kill score adjustment (bit shift). */
665 char comm[TASK_COMM_LEN]; /* executable name excluding path 695 char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -702,8 +732,6 @@ struct task_struct {
702 spinlock_t alloc_lock; 732 spinlock_t alloc_lock;
703/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 733/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
704 spinlock_t proc_lock; 734 spinlock_t proc_lock;
705/* context-switch lock */
706 spinlock_t switch_lock;
707 735
708/* journalling filesystem info */ 736/* journalling filesystem info */
709 void *journal_info; 737 void *journal_info;
@@ -741,6 +769,7 @@ struct task_struct {
741 nodemask_t mems_allowed; 769 nodemask_t mems_allowed;
742 int cpuset_mems_generation; 770 int cpuset_mems_generation;
743#endif 771#endif
772 atomic_t fs_excl; /* holding fs exclusive resources */
744}; 773};
745 774
746static inline pid_t process_group(struct task_struct *tsk) 775static inline pid_t process_group(struct task_struct *tsk)
@@ -910,7 +939,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
910#else 939#else
911 static inline void kick_process(struct task_struct *tsk) { } 940 static inline void kick_process(struct task_struct *tsk) { }
912#endif 941#endif
913extern void FASTCALL(sched_fork(task_t * p)); 942extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
914extern void FASTCALL(sched_exit(task_t * p)); 943extern void FASTCALL(sched_exit(task_t * p));
915 944
916extern int in_group_p(gid_t); 945extern int in_group_p(gid_t);
@@ -1090,7 +1119,8 @@ extern void unhash_process(struct task_struct *p);
1090 1119
1091/* 1120/*
1092 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring 1121 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
1093 * subscriptions and synchronises with wait4(). Also used in procfs. 1122 * subscriptions and synchronises with wait4(). Also used in procfs. Also
1123 * pins the final release of task.io_context.
1094 * 1124 *
1095 * Nests both inside and outside of read_lock(&tasklist_lock). 1125 * Nests both inside and outside of read_lock(&tasklist_lock).
1096 * It must not be nested with write_lock_irq(&tasklist_lock), 1126 * It must not be nested with write_lock_irq(&tasklist_lock),
@@ -1243,33 +1273,78 @@ extern void normalize_rt_tasks(void);
1243 1273
1244#endif 1274#endif
1245 1275
1246/* try_to_freeze
1247 *
1248 * Checks whether we need to enter the refrigerator
1249 * and returns 1 if we did so.
1250 */
1251#ifdef CONFIG_PM 1276#ifdef CONFIG_PM
1252extern void refrigerator(unsigned long); 1277/*
1278 * Check if a process has been frozen
1279 */
1280static inline int frozen(struct task_struct *p)
1281{
1282 return p->flags & PF_FROZEN;
1283}
1284
1285/*
1286 * Check if there is a request to freeze a process
1287 */
1288static inline int freezing(struct task_struct *p)
1289{
1290 return p->flags & PF_FREEZE;
1291}
1292
1293/*
1294 * Request that a process be frozen
1295 * FIXME: SMP problem. We may not modify other process' flags!
1296 */
1297static inline void freeze(struct task_struct *p)
1298{
1299 p->flags |= PF_FREEZE;
1300}
1301
1302/*
1303 * Wake up a frozen process
1304 */
1305static inline int thaw_process(struct task_struct *p)
1306{
1307 if (frozen(p)) {
1308 p->flags &= ~PF_FROZEN;
1309 wake_up_process(p);
1310 return 1;
1311 }
1312 return 0;
1313}
1314
1315/*
1316 * freezing is complete, mark process as frozen
1317 */
1318static inline void frozen_process(struct task_struct *p)
1319{
1320 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
1321}
1322
1323extern void refrigerator(void);
1253extern int freeze_processes(void); 1324extern int freeze_processes(void);
1254extern void thaw_processes(void); 1325extern void thaw_processes(void);
1255 1326
1256static inline int try_to_freeze(unsigned long refrigerator_flags) 1327static inline int try_to_freeze(void)
1257{ 1328{
1258 if (unlikely(current->flags & PF_FREEZE)) { 1329 if (freezing(current)) {
1259 refrigerator(refrigerator_flags); 1330 refrigerator();
1260 return 1; 1331 return 1;
1261 } else 1332 } else
1262 return 0; 1333 return 0;
1263} 1334}
1264#else 1335#else
1265static inline void refrigerator(unsigned long flag) {} 1336static inline int frozen(struct task_struct *p) { return 0; }
1337static inline int freezing(struct task_struct *p) { return 0; }
1338static inline void freeze(struct task_struct *p) { BUG(); }
1339static inline int thaw_process(struct task_struct *p) { return 1; }
1340static inline void frozen_process(struct task_struct *p) { BUG(); }
1341
1342static inline void refrigerator(void) {}
1266static inline int freeze_processes(void) { BUG(); return 0; } 1343static inline int freeze_processes(void) { BUG(); return 0; }
1267static inline void thaw_processes(void) {} 1344static inline void thaw_processes(void) {}
1268 1345
1269static inline int try_to_freeze(unsigned long refrigerator_flags) 1346static inline int try_to_freeze(void) { return 0; }
1270{ 1347
1271 return 0;
1272}
1273#endif /* CONFIG_PM */ 1348#endif /* CONFIG_PM */
1274#endif /* __KERNEL__ */ 1349#endif /* __KERNEL__ */
1275 1350