aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2008-10-31 12:13:10 -0400
committerTakashi Iwai <tiwai@suse.de>2008-10-31 12:13:10 -0400
commit7b3b6e42032e94a6132a85642e95106f5346650e (patch)
tree8b2262291341d8a9f9b1e7e3c63a3289bb6c6de6 /include/linux/sched.h
parent04172c0b9ea5861e5cba7909da5297b3aedac9e1 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into topic/asoc
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h124
1 files changed, 98 insertions, 26 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad15..8478f334d732 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -287,7 +287,6 @@ extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user); 287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 288extern void update_process_times(int user);
289extern void scheduler_tick(void); 289extern void scheduler_tick(void);
290extern void hrtick_resched(void);
291 290
292extern void sched_show_task(struct task_struct *p); 291extern void sched_show_task(struct task_struct *p);
293 292
@@ -352,7 +351,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
352extern void arch_unmap_area(struct mm_struct *, unsigned long); 351extern void arch_unmap_area(struct mm_struct *, unsigned long);
353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 352extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
354 353
355#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 354#if USE_SPLIT_PTLOCKS
356/* 355/*
357 * The mm counters are not protected by its page_table_lock, 356 * The mm counters are not protected by its page_table_lock,
358 * so must be incremented atomically. 357 * so must be incremented atomically.
@@ -363,7 +362,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 362#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 363#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
365 364
366#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 365#else /* !USE_SPLIT_PTLOCKS */
367/* 366/*
368 * The mm counters are protected by its page_table_lock, 367 * The mm counters are protected by its page_table_lock,
369 * so can be incremented directly. 368 * so can be incremented directly.
@@ -374,7 +373,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
374#define inc_mm_counter(mm, member) (mm)->_##member++ 373#define inc_mm_counter(mm, member) (mm)->_##member++
375#define dec_mm_counter(mm, member) (mm)->_##member-- 374#define dec_mm_counter(mm, member) (mm)->_##member--
376 375
377#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 376#endif /* !USE_SPLIT_PTLOCKS */
378 377
379#define get_mm_rss(mm) \ 378#define get_mm_rss(mm) \
380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 379 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
@@ -403,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
403#define MMF_DUMP_MAPPED_PRIVATE 4 402#define MMF_DUMP_MAPPED_PRIVATE 4
404#define MMF_DUMP_MAPPED_SHARED 5 403#define MMF_DUMP_MAPPED_SHARED 5
405#define MMF_DUMP_ELF_HEADERS 6 404#define MMF_DUMP_ELF_HEADERS 6
405#define MMF_DUMP_HUGETLB_PRIVATE 7
406#define MMF_DUMP_HUGETLB_SHARED 8
406#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 407#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
407#define MMF_DUMP_FILTER_BITS 5 408#define MMF_DUMP_FILTER_BITS 7
408#define MMF_DUMP_FILTER_MASK \ 409#define MMF_DUMP_FILTER_MASK \
409 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 410 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
410#define MMF_DUMP_FILTER_DEFAULT \ 411#define MMF_DUMP_FILTER_DEFAULT \
411 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 412 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
413 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
414
415#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
416# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
417#else
418# define MMF_DUMP_MASK_DEFAULT_ELF 0
419#endif
412 420
413struct sighand_struct { 421struct sighand_struct {
414 atomic_t count; 422 atomic_t count;
@@ -425,6 +433,39 @@ struct pacct_struct {
425 unsigned long ac_minflt, ac_majflt; 433 unsigned long ac_minflt, ac_majflt;
426}; 434};
427 435
436/**
437 * struct task_cputime - collected CPU time counts
438 * @utime: time spent in user mode, in &cputime_t units
439 * @stime: time spent in kernel mode, in &cputime_t units
440 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 *
442 * This structure groups together three kinds of CPU time that are
443 * tracked for threads and thread groups. Most things considering
444 * CPU time want to group these counts together and treat all three
445 * of them in parallel.
446 */
447struct task_cputime {
448 cputime_t utime;
449 cputime_t stime;
450 unsigned long long sum_exec_runtime;
451};
452/* Alternate field names when used to cache expirations. */
453#define prof_exp stime
454#define virt_exp utime
455#define sched_exp sum_exec_runtime
456
457/**
458 * struct thread_group_cputime - thread group interval timer counts
459 * @totals: thread group interval timers; substructure for
460 * uniprocessor kernel, per-cpu for SMP kernel.
461 *
462 * This structure contains the version of task_cputime, above, that is
463 * used for thread group CPU clock calculations.
464 */
465struct thread_group_cputime {
466 struct task_cputime *totals;
467};
468
428/* 469/*
429 * NOTE! "signal_struct" does not have it's own 470 * NOTE! "signal_struct" does not have it's own
430 * locking, because a shared signal_struct always 471 * locking, because a shared signal_struct always
@@ -451,8 +492,8 @@ struct signal_struct {
451 * - everyone except group_exit_task is stopped during signal delivery 492 * - everyone except group_exit_task is stopped during signal delivery
452 * of fatal signals, group_exit_task processes the signal. 493 * of fatal signals, group_exit_task processes the signal.
453 */ 494 */
454 struct task_struct *group_exit_task;
455 int notify_count; 495 int notify_count;
496 struct task_struct *group_exit_task;
456 497
457 /* thread group stop support, overloads group_exit_code too */ 498 /* thread group stop support, overloads group_exit_code too */
458 int group_stop_count; 499 int group_stop_count;
@@ -470,6 +511,17 @@ struct signal_struct {
470 cputime_t it_prof_expires, it_virt_expires; 511 cputime_t it_prof_expires, it_virt_expires;
471 cputime_t it_prof_incr, it_virt_incr; 512 cputime_t it_prof_incr, it_virt_incr;
472 513
514 /*
515 * Thread group totals for process CPU clocks.
516 * See thread_group_cputime(), et al, for details.
517 */
518 struct thread_group_cputime cputime;
519
520 /* Earliest-expiration cache. */
521 struct task_cputime cputime_expires;
522
523 struct list_head cpu_timers[3];
524
473 /* job control IDs */ 525 /* job control IDs */
474 526
475 /* 527 /*
@@ -500,7 +552,7 @@ struct signal_struct {
500 * Live threads maintain their own counters and add to these 552 * Live threads maintain their own counters and add to these
501 * in __exit_signal, except for the group leader. 553 * in __exit_signal, except for the group leader.
502 */ 554 */
503 cputime_t utime, stime, cutime, cstime; 555 cputime_t cutime, cstime;
504 cputime_t gtime; 556 cputime_t gtime;
505 cputime_t cgtime; 557 cputime_t cgtime;
506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 558 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +561,6 @@ struct signal_struct {
509 struct task_io_accounting ioac; 561 struct task_io_accounting ioac;
510 562
511 /* 563 /*
512 * Cumulative ns of scheduled CPU time for dead threads in the
513 * group, not including a zombie group leader. (This only differs
514 * from jiffies_to_ns(utime + stime) if sched_clock uses something
515 * other than jiffies.)
516 */
517 unsigned long long sum_sched_runtime;
518
519 /*
520 * We don't bother to synchronize most readers of this at all, 564 * We don't bother to synchronize most readers of this at all,
521 * because there is no reader checking a limit that actually needs 565 * because there is no reader checking a limit that actually needs
522 * to get both rlim_cur and rlim_max atomically, and either one 566 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,8 +571,6 @@ struct signal_struct {
527 */ 571 */
528 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
529 573
530 struct list_head cpu_timers[3];
531
532 /* keep the process-shared keyrings here so that they do the right 574 /* keep the process-shared keyrings here so that they do the right
533 * thing in threads created with CLONE_THREAD */ 575 * thing in threads created with CLONE_THREAD */
534#ifdef CONFIG_KEYS 576#ifdef CONFIG_KEYS
@@ -638,10 +680,6 @@ struct sched_info {
638}; 680};
639#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 681#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
640 682
641#ifdef CONFIG_SCHEDSTATS
642extern const struct file_operations proc_schedstat_operations;
643#endif /* CONFIG_SCHEDSTATS */
644
645#ifdef CONFIG_TASK_DELAY_ACCT 683#ifdef CONFIG_TASK_DELAY_ACCT
646struct task_delay_info { 684struct task_delay_info {
647 spinlock_t lock; 685 spinlock_t lock;
@@ -824,6 +862,9 @@ struct sched_domain {
824 unsigned int ttwu_move_affine; 862 unsigned int ttwu_move_affine;
825 unsigned int ttwu_move_balance; 863 unsigned int ttwu_move_balance;
826#endif 864#endif
865#ifdef CONFIG_SCHED_DEBUG
866 char *name;
867#endif
827}; 868};
828 869
829extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 870extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
@@ -897,7 +938,7 @@ struct sched_class {
897 void (*yield_task) (struct rq *rq); 938 void (*yield_task) (struct rq *rq);
898 int (*select_task_rq)(struct task_struct *p, int sync); 939 int (*select_task_rq)(struct task_struct *p, int sync);
899 940
900 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 941 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
901 942
902 struct task_struct * (*pick_next_task) (struct rq *rq); 943 struct task_struct * (*pick_next_task) (struct rq *rq);
903 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 944 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -1010,8 +1051,8 @@ struct sched_entity {
1010 1051
1011struct sched_rt_entity { 1052struct sched_rt_entity {
1012 struct list_head run_list; 1053 struct list_head run_list;
1013 unsigned int time_slice;
1014 unsigned long timeout; 1054 unsigned long timeout;
1055 unsigned int time_slice;
1015 int nr_cpus_allowed; 1056 int nr_cpus_allowed;
1016 1057
1017 struct sched_rt_entity *back; 1058 struct sched_rt_entity *back;
@@ -1134,8 +1175,7 @@ struct task_struct {
1134/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1175/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1135 unsigned long min_flt, maj_flt; 1176 unsigned long min_flt, maj_flt;
1136 1177
1137 cputime_t it_prof_expires, it_virt_expires; 1178 struct task_cputime cputime_expires;
1138 unsigned long long it_sched_expires;
1139 struct list_head cpu_timers[3]; 1179 struct list_head cpu_timers[3];
1140 1180
1141/* process credentials */ 1181/* process credentials */
@@ -1301,6 +1341,12 @@ struct task_struct {
1301 int latency_record_count; 1341 int latency_record_count;
1302 struct latency_record latency_record[LT_SAVECOUNT]; 1342 struct latency_record latency_record[LT_SAVECOUNT];
1303#endif 1343#endif
1344 /*
1345 * time slack values; these are used to round up poll() and
1346 * select() etc timeout values. These are in nanoseconds.
1347 */
1348 unsigned long timer_slack_ns;
1349 unsigned long default_timer_slack_ns;
1304}; 1350};
1305 1351
1306/* 1352/*
@@ -1585,6 +1631,7 @@ extern unsigned long long cpu_clock(int cpu);
1585 1631
1586extern unsigned long long 1632extern unsigned long long
1587task_sched_runtime(struct task_struct *task); 1633task_sched_runtime(struct task_struct *task);
1634extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1588 1635
1589/* sched_exec is called by processes performing an exec */ 1636/* sched_exec is called by processes performing an exec */
1590#ifdef CONFIG_SMP 1637#ifdef CONFIG_SMP
@@ -1619,6 +1666,7 @@ extern unsigned int sysctl_sched_features;
1619extern unsigned int sysctl_sched_migration_cost; 1666extern unsigned int sysctl_sched_migration_cost;
1620extern unsigned int sysctl_sched_nr_migrate; 1667extern unsigned int sysctl_sched_nr_migrate;
1621extern unsigned int sysctl_sched_shares_ratelimit; 1668extern unsigned int sysctl_sched_shares_ratelimit;
1669extern unsigned int sysctl_sched_shares_thresh;
1622 1670
1623int sched_nr_latency_handler(struct ctl_table *table, int write, 1671int sched_nr_latency_handler(struct ctl_table *table, int write,
1624 struct file *file, void __user *buffer, size_t *length, 1672 struct file *file, void __user *buffer, size_t *length,
@@ -2082,6 +2130,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2082} 2130}
2083 2131
2084/* 2132/*
2133 * Thread group CPU time accounting.
2134 */
2135
2136extern int thread_group_cputime_alloc(struct task_struct *);
2137extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2138
2139static inline void thread_group_cputime_init(struct signal_struct *sig)
2140{
2141 sig->cputime.totals = NULL;
2142}
2143
2144static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2145{
2146 if (curr->signal->cputime.totals)
2147 return 0;
2148 return thread_group_cputime_alloc(curr);
2149}
2150
2151static inline void thread_group_cputime_free(struct signal_struct *sig)
2152{
2153 free_percpu(sig->cputime.totals);
2154}
2155
2156/*
2085 * Reevaluate whether the task has signals pending delivery. 2157 * Reevaluate whether the task has signals pending delivery.
2086 * Wake the task if so. 2158 * Wake the task if so.
2087 * This is required every time the blocked sigset_t changes. 2159 * This is required every time the blocked sigset_t changes.