aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h121
1 files changed, 94 insertions, 27 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b58afd97a180..9530b1903160 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -246,7 +246,7 @@ struct mm_struct {
246 246
247 unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ 247 unsigned long saved_auxv[42]; /* for /proc/PID/auxv */
248 248
249 unsigned dumpable:1; 249 unsigned dumpable:2;
250 cpumask_t cpu_vm_mask; 250 cpumask_t cpu_vm_mask;
251 251
252 /* Architecture-specific MM context */ 252 /* Architecture-specific MM context */
@@ -368,6 +368,11 @@ struct signal_struct {
368#endif 368#endif
369}; 369};
370 370
371/* Context switch must be unlocked if interrupts are to be enabled */
372#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
373# define __ARCH_WANT_UNLOCKED_CTXSW
374#endif
375
371/* 376/*
372 * Bits in flags field of signal_struct. 377 * Bits in flags field of signal_struct.
373 */ 378 */
@@ -460,10 +465,11 @@ enum idle_type
460#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 465#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
461#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 466#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
462#define SD_BALANCE_EXEC 4 /* Balance on exec */ 467#define SD_BALANCE_EXEC 4 /* Balance on exec */
463#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ 468#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
464#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ 469#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
465#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ 470#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
466#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ 471#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
472#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
467 473
468struct sched_group { 474struct sched_group {
469 struct sched_group *next; /* Must be a circular list */ 475 struct sched_group *next; /* Must be a circular list */
@@ -488,6 +494,11 @@ struct sched_domain {
488 unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ 494 unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
489 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 495 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
490 unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ 496 unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
497 unsigned int busy_idx;
498 unsigned int idle_idx;
499 unsigned int newidle_idx;
500 unsigned int wake_idx;
501 unsigned int forkexec_idx;
491 int flags; /* See SD_* */ 502 int flags; /* See SD_* */
492 503
493 /* Runtime fields. */ 504 /* Runtime fields. */
@@ -511,10 +522,16 @@ struct sched_domain {
511 unsigned long alb_failed; 522 unsigned long alb_failed;
512 unsigned long alb_pushed; 523 unsigned long alb_pushed;
513 524
514 /* sched_balance_exec() stats */ 525 /* SD_BALANCE_EXEC stats */
515 unsigned long sbe_attempts; 526 unsigned long sbe_cnt;
527 unsigned long sbe_balanced;
516 unsigned long sbe_pushed; 528 unsigned long sbe_pushed;
517 529
530 /* SD_BALANCE_FORK stats */
531 unsigned long sbf_cnt;
532 unsigned long sbf_balanced;
533 unsigned long sbf_pushed;
534
518 /* try_to_wake_up() stats */ 535 /* try_to_wake_up() stats */
519 unsigned long ttwu_wake_remote; 536 unsigned long ttwu_wake_remote;
520 unsigned long ttwu_move_affine; 537 unsigned long ttwu_move_affine;
@@ -522,6 +539,8 @@ struct sched_domain {
522#endif 539#endif
523}; 540};
524 541
542extern void partition_sched_domains(cpumask_t *partition1,
543 cpumask_t *partition2);
525#ifdef ARCH_HAS_SCHED_DOMAIN 544#ifdef ARCH_HAS_SCHED_DOMAIN
526/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ 545/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */
527extern cpumask_t cpu_isolated_map; 546extern cpumask_t cpu_isolated_map;
@@ -561,9 +580,10 @@ struct group_info {
561 groups_free(group_info); \ 580 groups_free(group_info); \
562} while (0) 581} while (0)
563 582
564struct group_info *groups_alloc(int gidsetsize); 583extern struct group_info *groups_alloc(int gidsetsize);
565void groups_free(struct group_info *group_info); 584extern void groups_free(struct group_info *group_info);
566int set_current_groups(struct group_info *group_info); 585extern int set_current_groups(struct group_info *group_info);
586extern int groups_search(struct group_info *group_info, gid_t grp);
567/* access the groups "array" with this macro */ 587/* access the groups "array" with this macro */
568#define GROUP_AT(gi, i) \ 588#define GROUP_AT(gi, i) \
569 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 589 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
@@ -581,6 +601,9 @@ struct task_struct {
581 601
582 int lock_depth; /* BKL lock depth */ 602 int lock_depth; /* BKL lock depth */
583 603
604#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
605 int oncpu;
606#endif
584 int prio, static_prio; 607 int prio, static_prio;
585 struct list_head run_list; 608 struct list_head run_list;
586 prio_array_t *array; 609 prio_array_t *array;
@@ -660,6 +683,7 @@ struct task_struct {
660 struct user_struct *user; 683 struct user_struct *user;
661#ifdef CONFIG_KEYS 684#ifdef CONFIG_KEYS
662 struct key *thread_keyring; /* keyring private to this thread */ 685 struct key *thread_keyring; /* keyring private to this thread */
686 unsigned char jit_keyring; /* default keyring to attach requested keys to */
663#endif 687#endif
664 int oomkilladj; /* OOM kill score adjustment (bit shift). */ 688 int oomkilladj; /* OOM kill score adjustment (bit shift). */
665 char comm[TASK_COMM_LEN]; /* executable name excluding path 689 char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -702,8 +726,6 @@ struct task_struct {
702 spinlock_t alloc_lock; 726 spinlock_t alloc_lock;
703/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 727/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
704 spinlock_t proc_lock; 728 spinlock_t proc_lock;
705/* context-switch lock */
706 spinlock_t switch_lock;
707 729
708/* journalling filesystem info */ 730/* journalling filesystem info */
709 void *journal_info; 731 void *journal_info;
@@ -910,7 +932,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
910#else 932#else
911 static inline void kick_process(struct task_struct *tsk) { } 933 static inline void kick_process(struct task_struct *tsk) { }
912#endif 934#endif
913extern void FASTCALL(sched_fork(task_t * p)); 935extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
914extern void FASTCALL(sched_exit(task_t * p)); 936extern void FASTCALL(sched_exit(task_t * p));
915 937
916extern int in_group_p(gid_t); 938extern int in_group_p(gid_t);
@@ -1243,33 +1265,78 @@ extern void normalize_rt_tasks(void);
1243 1265
1244#endif 1266#endif
1245 1267
1246/* try_to_freeze
1247 *
1248 * Checks whether we need to enter the refrigerator
1249 * and returns 1 if we did so.
1250 */
1251#ifdef CONFIG_PM 1268#ifdef CONFIG_PM
1252extern void refrigerator(unsigned long); 1269/*
1270 * Check if a process has been frozen
1271 */
1272static inline int frozen(struct task_struct *p)
1273{
1274 return p->flags & PF_FROZEN;
1275}
1276
1277/*
1278 * Check if there is a request to freeze a process
1279 */
1280static inline int freezing(struct task_struct *p)
1281{
1282 return p->flags & PF_FREEZE;
1283}
1284
1285/*
1286 * Request that a process be frozen
1287 * FIXME: SMP problem. We may not modify other process' flags!
1288 */
1289static inline void freeze(struct task_struct *p)
1290{
1291 p->flags |= PF_FREEZE;
1292}
1293
1294/*
1295 * Wake up a frozen process
1296 */
1297static inline int thaw_process(struct task_struct *p)
1298{
1299 if (frozen(p)) {
1300 p->flags &= ~PF_FROZEN;
1301 wake_up_process(p);
1302 return 1;
1303 }
1304 return 0;
1305}
1306
1307/*
1308 * freezing is complete, mark process as frozen
1309 */
1310static inline void frozen_process(struct task_struct *p)
1311{
1312 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
1313}
1314
1315extern void refrigerator(void);
1253extern int freeze_processes(void); 1316extern int freeze_processes(void);
1254extern void thaw_processes(void); 1317extern void thaw_processes(void);
1255 1318
1256static inline int try_to_freeze(unsigned long refrigerator_flags) 1319static inline int try_to_freeze(void)
1257{ 1320{
1258 if (unlikely(current->flags & PF_FREEZE)) { 1321 if (freezing(current)) {
1259 refrigerator(refrigerator_flags); 1322 refrigerator();
1260 return 1; 1323 return 1;
1261 } else 1324 } else
1262 return 0; 1325 return 0;
1263} 1326}
1264#else 1327#else
1265static inline void refrigerator(unsigned long flag) {} 1328static inline int frozen(struct task_struct *p) { return 0; }
1329static inline int freezing(struct task_struct *p) { return 0; }
1330static inline void freeze(struct task_struct *p) { BUG(); }
1331static inline int thaw_process(struct task_struct *p) { return 1; }
1332static inline void frozen_process(struct task_struct *p) { BUG(); }
1333
1334static inline void refrigerator(void) {}
1266static inline int freeze_processes(void) { BUG(); return 0; } 1335static inline int freeze_processes(void) { BUG(); return 0; }
1267static inline void thaw_processes(void) {} 1336static inline void thaw_processes(void) {}
1268 1337
1269static inline int try_to_freeze(unsigned long refrigerator_flags) 1338static inline int try_to_freeze(void) { return 0; }
1270{ 1339
1271 return 0;
1272}
1273#endif /* CONFIG_PM */ 1340#endif /* CONFIG_PM */
1274#endif /* __KERNEL__ */ 1341#endif /* __KERNEL__ */
1275 1342