diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
---|---|---|
committer | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
commit | 651dab4264e4ba0e563f5ff56f748127246e9065 (patch) | |
tree | 016630974bdcb00fe529b673f96d389e0fd6dc94 /include/linux/sched.h | |
parent | 40b8606253552109815786e5d4b0de98782d31f5 (diff) | |
parent | 2e532d68a2b3e2aa6b19731501222069735c741c (diff) |
Merge commit 'linus/master' into merge-linus
Conflicts:
arch/x86/kvm/i8254.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index dcc03fd5a7f3..de53c109fd04 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
352 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 352 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
353 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 353 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
354 | 354 | ||
355 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | 355 | #if USE_SPLIT_PTLOCKS |
356 | /* | 356 | /* |
357 | * The mm counters are not protected by its page_table_lock, | 357 | * The mm counters are not protected by its page_table_lock, |
358 | * so must be incremented atomically. | 358 | * so must be incremented atomically. |
@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
363 | #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) | 363 | #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) |
364 | #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) | 364 | #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) |
365 | 365 | ||
366 | #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 366 | #else /* !USE_SPLIT_PTLOCKS */ |
367 | /* | 367 | /* |
368 | * The mm counters are protected by its page_table_lock, | 368 | * The mm counters are protected by its page_table_lock, |
369 | * so can be incremented directly. | 369 | * so can be incremented directly. |
@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
374 | #define inc_mm_counter(mm, member) (mm)->_##member++ | 374 | #define inc_mm_counter(mm, member) (mm)->_##member++ |
375 | #define dec_mm_counter(mm, member) (mm)->_##member-- | 375 | #define dec_mm_counter(mm, member) (mm)->_##member-- |
376 | 376 | ||
377 | #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 377 | #endif /* !USE_SPLIT_PTLOCKS */ |
378 | 378 | ||
379 | #define get_mm_rss(mm) \ | 379 | #define get_mm_rss(mm) \ |
380 | (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) | 380 | (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) |
@@ -451,8 +451,8 @@ struct signal_struct { | |||
451 | * - everyone except group_exit_task is stopped during signal delivery | 451 | * - everyone except group_exit_task is stopped during signal delivery |
452 | * of fatal signals, group_exit_task processes the signal. | 452 | * of fatal signals, group_exit_task processes the signal. |
453 | */ | 453 | */ |
454 | struct task_struct *group_exit_task; | ||
455 | int notify_count; | 454 | int notify_count; |
455 | struct task_struct *group_exit_task; | ||
456 | 456 | ||
457 | /* thread group stop support, overloads group_exit_code too */ | 457 | /* thread group stop support, overloads group_exit_code too */ |
458 | int group_stop_count; | 458 | int group_stop_count; |
@@ -824,6 +824,9 @@ struct sched_domain { | |||
824 | unsigned int ttwu_move_affine; | 824 | unsigned int ttwu_move_affine; |
825 | unsigned int ttwu_move_balance; | 825 | unsigned int ttwu_move_balance; |
826 | #endif | 826 | #endif |
827 | #ifdef CONFIG_SCHED_DEBUG | ||
828 | char *name; | ||
829 | #endif | ||
827 | }; | 830 | }; |
828 | 831 | ||
829 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 832 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
@@ -897,7 +900,7 @@ struct sched_class { | |||
897 | void (*yield_task) (struct rq *rq); | 900 | void (*yield_task) (struct rq *rq); |
898 | int (*select_task_rq)(struct task_struct *p, int sync); | 901 | int (*select_task_rq)(struct task_struct *p, int sync); |
899 | 902 | ||
900 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | 903 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); |
901 | 904 | ||
902 | struct task_struct * (*pick_next_task) (struct rq *rq); | 905 | struct task_struct * (*pick_next_task) (struct rq *rq); |
903 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 906 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
@@ -1010,8 +1013,8 @@ struct sched_entity { | |||
1010 | 1013 | ||
1011 | struct sched_rt_entity { | 1014 | struct sched_rt_entity { |
1012 | struct list_head run_list; | 1015 | struct list_head run_list; |
1013 | unsigned int time_slice; | ||
1014 | unsigned long timeout; | 1016 | unsigned long timeout; |
1017 | unsigned int time_slice; | ||
1015 | int nr_cpus_allowed; | 1018 | int nr_cpus_allowed; |
1016 | 1019 | ||
1017 | struct sched_rt_entity *back; | 1020 | struct sched_rt_entity *back; |