diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 138 |
1 files changed, 105 insertions, 33 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dbb109022f3..ff48815bd3a2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -201,8 +201,8 @@ extern unsigned long | |||
201 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | 201 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
202 | unsigned long len, unsigned long pgoff, | 202 | unsigned long len, unsigned long pgoff, |
203 | unsigned long flags); | 203 | unsigned long flags); |
204 | extern void arch_unmap_area(struct vm_area_struct *area); | 204 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
205 | extern void arch_unmap_area_topdown(struct vm_area_struct *area); | 205 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
206 | 206 | ||
207 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) | 207 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) |
208 | #define get_mm_counter(mm, member) ((mm)->_##member) | 208 | #define get_mm_counter(mm, member) ((mm)->_##member) |
@@ -218,9 +218,10 @@ struct mm_struct { | |||
218 | unsigned long (*get_unmapped_area) (struct file *filp, | 218 | unsigned long (*get_unmapped_area) (struct file *filp, |
219 | unsigned long addr, unsigned long len, | 219 | unsigned long addr, unsigned long len, |
220 | unsigned long pgoff, unsigned long flags); | 220 | unsigned long pgoff, unsigned long flags); |
221 | void (*unmap_area) (struct vm_area_struct *area); | 221 | void (*unmap_area) (struct mm_struct *mm, unsigned long addr); |
222 | unsigned long mmap_base; /* base of mmap area */ | 222 | unsigned long mmap_base; /* base of mmap area */ |
223 | unsigned long free_area_cache; /* first hole */ | 223 | unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ |
224 | unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ | ||
224 | pgd_t * pgd; | 225 | pgd_t * pgd; |
225 | atomic_t mm_users; /* How many users with user space? */ | 226 | atomic_t mm_users; /* How many users with user space? */ |
226 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ | 227 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ |
@@ -245,7 +246,7 @@ struct mm_struct { | |||
245 | 246 | ||
246 | unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ | 247 | unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ |
247 | 248 | ||
248 | unsigned dumpable:1; | 249 | unsigned dumpable:2; |
249 | cpumask_t cpu_vm_mask; | 250 | cpumask_t cpu_vm_mask; |
250 | 251 | ||
251 | /* Architecture-specific MM context */ | 252 | /* Architecture-specific MM context */ |
@@ -367,6 +368,11 @@ struct signal_struct { | |||
367 | #endif | 368 | #endif |
368 | }; | 369 | }; |
369 | 370 | ||
371 | /* Context switch must be unlocked if interrupts are to be enabled */ | ||
372 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
373 | # define __ARCH_WANT_UNLOCKED_CTXSW | ||
374 | #endif | ||
375 | |||
370 | /* | 376 | /* |
371 | * Bits in flags field of signal_struct. | 377 | * Bits in flags field of signal_struct. |
372 | */ | 378 | */ |
@@ -459,10 +465,11 @@ enum idle_type | |||
459 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 465 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ |
460 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ | 466 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ |
461 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ | 467 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ |
462 | #define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ | 468 | #define SD_BALANCE_FORK 8 /* Balance on fork, clone */ |
463 | #define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ | 469 | #define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ |
464 | #define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ | 470 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ |
465 | #define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ | 471 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ |
472 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ | ||
466 | 473 | ||
467 | struct sched_group { | 474 | struct sched_group { |
468 | struct sched_group *next; /* Must be a circular list */ | 475 | struct sched_group *next; /* Must be a circular list */ |
@@ -487,6 +494,11 @@ struct sched_domain { | |||
487 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | 494 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ |
488 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | 495 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
489 | unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ | 496 | unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ |
497 | unsigned int busy_idx; | ||
498 | unsigned int idle_idx; | ||
499 | unsigned int newidle_idx; | ||
500 | unsigned int wake_idx; | ||
501 | unsigned int forkexec_idx; | ||
490 | int flags; /* See SD_* */ | 502 | int flags; /* See SD_* */ |
491 | 503 | ||
492 | /* Runtime fields. */ | 504 | /* Runtime fields. */ |
@@ -510,10 +522,16 @@ struct sched_domain { | |||
510 | unsigned long alb_failed; | 522 | unsigned long alb_failed; |
511 | unsigned long alb_pushed; | 523 | unsigned long alb_pushed; |
512 | 524 | ||
513 | /* sched_balance_exec() stats */ | 525 | /* SD_BALANCE_EXEC stats */ |
514 | unsigned long sbe_attempts; | 526 | unsigned long sbe_cnt; |
527 | unsigned long sbe_balanced; | ||
515 | unsigned long sbe_pushed; | 528 | unsigned long sbe_pushed; |
516 | 529 | ||
530 | /* SD_BALANCE_FORK stats */ | ||
531 | unsigned long sbf_cnt; | ||
532 | unsigned long sbf_balanced; | ||
533 | unsigned long sbf_pushed; | ||
534 | |||
517 | /* try_to_wake_up() stats */ | 535 | /* try_to_wake_up() stats */ |
518 | unsigned long ttwu_wake_remote; | 536 | unsigned long ttwu_wake_remote; |
519 | unsigned long ttwu_move_affine; | 537 | unsigned long ttwu_move_affine; |
@@ -521,6 +539,8 @@ struct sched_domain { | |||
521 | #endif | 539 | #endif |
522 | }; | 540 | }; |
523 | 541 | ||
542 | extern void partition_sched_domains(cpumask_t *partition1, | ||
543 | cpumask_t *partition2); | ||
524 | #ifdef ARCH_HAS_SCHED_DOMAIN | 544 | #ifdef ARCH_HAS_SCHED_DOMAIN |
525 | /* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ | 545 | /* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ |
526 | extern cpumask_t cpu_isolated_map; | 546 | extern cpumask_t cpu_isolated_map; |
@@ -560,9 +580,10 @@ struct group_info { | |||
560 | groups_free(group_info); \ | 580 | groups_free(group_info); \ |
561 | } while (0) | 581 | } while (0) |
562 | 582 | ||
563 | struct group_info *groups_alloc(int gidsetsize); | 583 | extern struct group_info *groups_alloc(int gidsetsize); |
564 | void groups_free(struct group_info *group_info); | 584 | extern void groups_free(struct group_info *group_info); |
565 | int set_current_groups(struct group_info *group_info); | 585 | extern int set_current_groups(struct group_info *group_info); |
586 | extern int groups_search(struct group_info *group_info, gid_t grp); | ||
566 | /* access the groups "array" with this macro */ | 587 | /* access the groups "array" with this macro */ |
567 | #define GROUP_AT(gi, i) \ | 588 | #define GROUP_AT(gi, i) \ |
568 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 589 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) |
@@ -580,10 +601,15 @@ struct task_struct { | |||
580 | 601 | ||
581 | int lock_depth; /* BKL lock depth */ | 602 | int lock_depth; /* BKL lock depth */ |
582 | 603 | ||
604 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
605 | int oncpu; | ||
606 | #endif | ||
583 | int prio, static_prio; | 607 | int prio, static_prio; |
584 | struct list_head run_list; | 608 | struct list_head run_list; |
585 | prio_array_t *array; | 609 | prio_array_t *array; |
586 | 610 | ||
611 | unsigned short ioprio; | ||
612 | |||
587 | unsigned long sleep_avg; | 613 | unsigned long sleep_avg; |
588 | unsigned long long timestamp, last_ran; | 614 | unsigned long long timestamp, last_ran; |
589 | unsigned long long sched_time; /* sched_clock time spent running */ | 615 | unsigned long long sched_time; /* sched_clock time spent running */ |
@@ -659,6 +685,7 @@ struct task_struct { | |||
659 | struct user_struct *user; | 685 | struct user_struct *user; |
660 | #ifdef CONFIG_KEYS | 686 | #ifdef CONFIG_KEYS |
661 | struct key *thread_keyring; /* keyring private to this thread */ | 687 | struct key *thread_keyring; /* keyring private to this thread */ |
688 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
662 | #endif | 689 | #endif |
663 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ | 690 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ |
664 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 691 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
@@ -701,8 +728,6 @@ struct task_struct { | |||
701 | spinlock_t alloc_lock; | 728 | spinlock_t alloc_lock; |
702 | /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ | 729 | /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ |
703 | spinlock_t proc_lock; | 730 | spinlock_t proc_lock; |
704 | /* context-switch lock */ | ||
705 | spinlock_t switch_lock; | ||
706 | 731 | ||
707 | /* journalling filesystem info */ | 732 | /* journalling filesystem info */ |
708 | void *journal_info; | 733 | void *journal_info; |
@@ -740,6 +765,7 @@ struct task_struct { | |||
740 | nodemask_t mems_allowed; | 765 | nodemask_t mems_allowed; |
741 | int cpuset_mems_generation; | 766 | int cpuset_mems_generation; |
742 | #endif | 767 | #endif |
768 | atomic_t fs_excl; /* holding fs exclusive resources */ | ||
743 | }; | 769 | }; |
744 | 770 | ||
745 | static inline pid_t process_group(struct task_struct *tsk) | 771 | static inline pid_t process_group(struct task_struct *tsk) |
@@ -909,7 +935,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | |||
909 | #else | 935 | #else |
910 | static inline void kick_process(struct task_struct *tsk) { } | 936 | static inline void kick_process(struct task_struct *tsk) { } |
911 | #endif | 937 | #endif |
912 | extern void FASTCALL(sched_fork(task_t * p)); | 938 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); |
913 | extern void FASTCALL(sched_exit(task_t * p)); | 939 | extern void FASTCALL(sched_exit(task_t * p)); |
914 | 940 | ||
915 | extern int in_group_p(gid_t); | 941 | extern int in_group_p(gid_t); |
@@ -1089,7 +1115,8 @@ extern void unhash_process(struct task_struct *p); | |||
1089 | 1115 | ||
1090 | /* | 1116 | /* |
1091 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring | 1117 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring |
1092 | * subscriptions and synchronises with wait4(). Also used in procfs. | 1118 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
1119 | * pins the final release of task.io_context. | ||
1093 | * | 1120 | * |
1094 | * Nests both inside and outside of read_lock(&tasklist_lock). | 1121 | * Nests both inside and outside of read_lock(&tasklist_lock). |
1095 | * It must not be nested with write_lock_irq(&tasklist_lock), | 1122 | * It must not be nested with write_lock_irq(&tasklist_lock), |
@@ -1242,33 +1269,78 @@ extern void normalize_rt_tasks(void); | |||
1242 | 1269 | ||
1243 | #endif | 1270 | #endif |
1244 | 1271 | ||
1245 | /* try_to_freeze | ||
1246 | * | ||
1247 | * Checks whether we need to enter the refrigerator | ||
1248 | * and returns 1 if we did so. | ||
1249 | */ | ||
1250 | #ifdef CONFIG_PM | 1272 | #ifdef CONFIG_PM |
1251 | extern void refrigerator(unsigned long); | 1273 | /* |
1274 | * Check if a process has been frozen | ||
1275 | */ | ||
1276 | static inline int frozen(struct task_struct *p) | ||
1277 | { | ||
1278 | return p->flags & PF_FROZEN; | ||
1279 | } | ||
1280 | |||
1281 | /* | ||
1282 | * Check if there is a request to freeze a process | ||
1283 | */ | ||
1284 | static inline int freezing(struct task_struct *p) | ||
1285 | { | ||
1286 | return p->flags & PF_FREEZE; | ||
1287 | } | ||
1288 | |||
1289 | /* | ||
1290 | * Request that a process be frozen | ||
1291 | * FIXME: SMP problem. We may not modify other process' flags! | ||
1292 | */ | ||
1293 | static inline void freeze(struct task_struct *p) | ||
1294 | { | ||
1295 | p->flags |= PF_FREEZE; | ||
1296 | } | ||
1297 | |||
1298 | /* | ||
1299 | * Wake up a frozen process | ||
1300 | */ | ||
1301 | static inline int thaw_process(struct task_struct *p) | ||
1302 | { | ||
1303 | if (frozen(p)) { | ||
1304 | p->flags &= ~PF_FROZEN; | ||
1305 | wake_up_process(p); | ||
1306 | return 1; | ||
1307 | } | ||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | /* | ||
1312 | * freezing is complete, mark process as frozen | ||
1313 | */ | ||
1314 | static inline void frozen_process(struct task_struct *p) | ||
1315 | { | ||
1316 | p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; | ||
1317 | } | ||
1318 | |||
1319 | extern void refrigerator(void); | ||
1252 | extern int freeze_processes(void); | 1320 | extern int freeze_processes(void); |
1253 | extern void thaw_processes(void); | 1321 | extern void thaw_processes(void); |
1254 | 1322 | ||
1255 | static inline int try_to_freeze(unsigned long refrigerator_flags) | 1323 | static inline int try_to_freeze(void) |
1256 | { | 1324 | { |
1257 | if (unlikely(current->flags & PF_FREEZE)) { | 1325 | if (freezing(current)) { |
1258 | refrigerator(refrigerator_flags); | 1326 | refrigerator(); |
1259 | return 1; | 1327 | return 1; |
1260 | } else | 1328 | } else |
1261 | return 0; | 1329 | return 0; |
1262 | } | 1330 | } |
1263 | #else | 1331 | #else |
1264 | static inline void refrigerator(unsigned long flag) {} | 1332 | static inline int frozen(struct task_struct *p) { return 0; } |
1333 | static inline int freezing(struct task_struct *p) { return 0; } | ||
1334 | static inline void freeze(struct task_struct *p) { BUG(); } | ||
1335 | static inline int thaw_process(struct task_struct *p) { return 1; } | ||
1336 | static inline void frozen_process(struct task_struct *p) { BUG(); } | ||
1337 | |||
1338 | static inline void refrigerator(void) {} | ||
1265 | static inline int freeze_processes(void) { BUG(); return 0; } | 1339 | static inline int freeze_processes(void) { BUG(); return 0; } |
1266 | static inline void thaw_processes(void) {} | 1340 | static inline void thaw_processes(void) {} |
1267 | 1341 | ||
1268 | static inline int try_to_freeze(unsigned long refrigerator_flags) | 1342 | static inline int try_to_freeze(void) { return 0; } |
1269 | { | 1343 | |
1270 | return 0; | ||
1271 | } | ||
1272 | #endif /* CONFIG_PM */ | 1344 | #endif /* CONFIG_PM */ |
1273 | #endif /* __KERNEL__ */ | 1345 | #endif /* __KERNEL__ */ |
1274 | 1346 | ||