diff options
| author | Len Brown <len.brown@intel.com> | 2006-06-29 19:57:46 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2006-06-29 19:57:46 -0400 |
| commit | d120cfb544ed6161b9d32fb6c4648c471807ee6b (patch) | |
| tree | 7757ad0198d8df76ff5c60f939a687687c41da00 /include/linux/sched.h | |
| parent | 9dce0e950dbfab4148f35ac6f297d8638cdc63c4 (diff) | |
| parent | bf7e8511088963078484132636839b59e25cf14f (diff) | |
merge linus into release branch
Conflicts:
drivers/acpi/acpi_memhotplug.c
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 122a25c1b997..821f0481ebe1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -73,6 +73,7 @@ struct sched_param { | |||
| 73 | #include <linux/seccomp.h> | 73 | #include <linux/seccomp.h> |
| 74 | #include <linux/rcupdate.h> | 74 | #include <linux/rcupdate.h> |
| 75 | #include <linux/futex.h> | 75 | #include <linux/futex.h> |
| 76 | #include <linux/rtmutex.h> | ||
| 76 | 77 | ||
| 77 | #include <linux/time.h> | 78 | #include <linux/time.h> |
| 78 | #include <linux/param.h> | 79 | #include <linux/param.h> |
| @@ -83,6 +84,7 @@ struct sched_param { | |||
| 83 | #include <asm/processor.h> | 84 | #include <asm/processor.h> |
| 84 | 85 | ||
| 85 | struct exec_domain; | 86 | struct exec_domain; |
| 87 | struct futex_pi_state; | ||
| 86 | 88 | ||
| 87 | /* | 89 | /* |
| 88 | * List of flags we want to share for kernel threads, | 90 | * List of flags we want to share for kernel threads, |
| @@ -123,6 +125,7 @@ extern unsigned long nr_running(void); | |||
| 123 | extern unsigned long nr_uninterruptible(void); | 125 | extern unsigned long nr_uninterruptible(void); |
| 124 | extern unsigned long nr_active(void); | 126 | extern unsigned long nr_active(void); |
| 125 | extern unsigned long nr_iowait(void); | 127 | extern unsigned long nr_iowait(void); |
| 128 | extern unsigned long weighted_cpuload(const int cpu); | ||
| 126 | 129 | ||
| 127 | 130 | ||
| 128 | /* | 131 | /* |
| @@ -494,8 +497,11 @@ struct signal_struct { | |||
| 494 | 497 | ||
| 495 | #define MAX_PRIO (MAX_RT_PRIO + 40) | 498 | #define MAX_PRIO (MAX_RT_PRIO + 40) |
| 496 | 499 | ||
| 497 | #define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) | 500 | #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) |
| 501 | #define rt_task(p) rt_prio((p)->prio) | ||
| 498 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) | 502 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) |
| 503 | #define has_rt_policy(p) \ | ||
| 504 | unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) | ||
| 499 | 505 | ||
| 500 | /* | 506 | /* |
| 501 | * Some day this will be a full-fledged user tracking system.. | 507 | * Some day this will be a full-fledged user tracking system.. |
| @@ -558,9 +564,9 @@ enum idle_type | |||
| 558 | /* | 564 | /* |
| 559 | * sched-domains (multiprocessor balancing) declarations: | 565 | * sched-domains (multiprocessor balancing) declarations: |
| 560 | */ | 566 | */ |
| 561 | #ifdef CONFIG_SMP | ||
| 562 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ | 567 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ |
| 563 | 568 | ||
| 569 | #ifdef CONFIG_SMP | ||
| 564 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 570 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ |
| 565 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ | 571 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ |
| 566 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ | 572 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ |
| @@ -569,6 +575,11 @@ enum idle_type | |||
| 569 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ | 575 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ |
| 570 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ | 576 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ |
| 571 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ | 577 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ |
| 578 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | ||
| 579 | |||
| 580 | #define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ | ||
| 581 | ? SD_POWERSAVINGS_BALANCE : 0) | ||
| 582 | |||
| 572 | 583 | ||
| 573 | struct sched_group { | 584 | struct sched_group { |
| 574 | struct sched_group *next; /* Must be a circular list */ | 585 | struct sched_group *next; /* Must be a circular list */ |
| @@ -638,7 +649,7 @@ struct sched_domain { | |||
| 638 | #endif | 649 | #endif |
| 639 | }; | 650 | }; |
| 640 | 651 | ||
| 641 | extern void partition_sched_domains(cpumask_t *partition1, | 652 | extern int partition_sched_domains(cpumask_t *partition1, |
| 642 | cpumask_t *partition2); | 653 | cpumask_t *partition2); |
| 643 | 654 | ||
| 644 | /* | 655 | /* |
| @@ -713,10 +724,13 @@ struct task_struct { | |||
| 713 | 724 | ||
| 714 | int lock_depth; /* BKL lock depth */ | 725 | int lock_depth; /* BKL lock depth */ |
| 715 | 726 | ||
| 716 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 727 | #ifdef CONFIG_SMP |
| 728 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
| 717 | int oncpu; | 729 | int oncpu; |
| 718 | #endif | 730 | #endif |
| 719 | int prio, static_prio; | 731 | #endif |
| 732 | int load_weight; /* for niceness load balancing purposes */ | ||
| 733 | int prio, static_prio, normal_prio; | ||
| 720 | struct list_head run_list; | 734 | struct list_head run_list; |
| 721 | prio_array_t *array; | 735 | prio_array_t *array; |
| 722 | 736 | ||
| @@ -843,6 +857,20 @@ struct task_struct { | |||
| 843 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 857 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
| 844 | spinlock_t alloc_lock; | 858 | spinlock_t alloc_lock; |
| 845 | 859 | ||
| 860 | /* Protection of the PI data structures: */ | ||
| 861 | spinlock_t pi_lock; | ||
| 862 | |||
| 863 | #ifdef CONFIG_RT_MUTEXES | ||
| 864 | /* PI waiters blocked on a rt_mutex held by this task */ | ||
| 865 | struct plist_head pi_waiters; | ||
| 866 | /* Deadlock detection and priority inheritance handling */ | ||
| 867 | struct rt_mutex_waiter *pi_blocked_on; | ||
| 868 | # ifdef CONFIG_DEBUG_RT_MUTEXES | ||
| 869 | spinlock_t held_list_lock; | ||
| 870 | struct list_head held_list_head; | ||
| 871 | # endif | ||
| 872 | #endif | ||
| 873 | |||
| 846 | #ifdef CONFIG_DEBUG_MUTEXES | 874 | #ifdef CONFIG_DEBUG_MUTEXES |
| 847 | /* mutex deadlock detection */ | 875 | /* mutex deadlock detection */ |
| 848 | struct mutex_waiter *blocked_on; | 876 | struct mutex_waiter *blocked_on; |
| @@ -888,6 +916,8 @@ struct task_struct { | |||
| 888 | #ifdef CONFIG_COMPAT | 916 | #ifdef CONFIG_COMPAT |
| 889 | struct compat_robust_list_head __user *compat_robust_list; | 917 | struct compat_robust_list_head __user *compat_robust_list; |
| 890 | #endif | 918 | #endif |
| 919 | struct list_head pi_state_list; | ||
| 920 | struct futex_pi_state *pi_state_cache; | ||
| 891 | 921 | ||
| 892 | atomic_t fs_excl; /* holding fs exclusive resources */ | 922 | atomic_t fs_excl; /* holding fs exclusive resources */ |
| 893 | struct rcu_head rcu; | 923 | struct rcu_head rcu; |
| @@ -955,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 955 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 985 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
| 956 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 986 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
| 957 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 987 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
| 988 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | ||
| 958 | 989 | ||
| 959 | /* | 990 | /* |
| 960 | * Only the _current_ task can read/write to tsk->flags, but other | 991 | * Only the _current_ task can read/write to tsk->flags, but other |
| @@ -1009,6 +1040,19 @@ static inline void idle_task_exit(void) {} | |||
| 1009 | #endif | 1040 | #endif |
| 1010 | 1041 | ||
| 1011 | extern void sched_idle_next(void); | 1042 | extern void sched_idle_next(void); |
| 1043 | |||
| 1044 | #ifdef CONFIG_RT_MUTEXES | ||
| 1045 | extern int rt_mutex_getprio(task_t *p); | ||
| 1046 | extern void rt_mutex_setprio(task_t *p, int prio); | ||
| 1047 | extern void rt_mutex_adjust_pi(task_t *p); | ||
| 1048 | #else | ||
| 1049 | static inline int rt_mutex_getprio(task_t *p) | ||
| 1050 | { | ||
| 1051 | return p->normal_prio; | ||
| 1052 | } | ||
| 1053 | # define rt_mutex_adjust_pi(p) do { } while (0) | ||
| 1054 | #endif | ||
| 1055 | |||
| 1012 | extern void set_user_nice(task_t *p, long nice); | 1056 | extern void set_user_nice(task_t *p, long nice); |
| 1013 | extern int task_prio(const task_t *p); | 1057 | extern int task_prio(const task_t *p); |
| 1014 | extern int task_nice(const task_t *p); | 1058 | extern int task_nice(const task_t *p); |
| @@ -1408,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
| 1408 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 1452 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); |
| 1409 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 1453 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
| 1410 | 1454 | ||
| 1455 | #include <linux/sysdev.h> | ||
| 1456 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
| 1457 | extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; | ||
| 1458 | extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); | ||
| 1459 | |||
| 1411 | extern void normalize_rt_tasks(void); | 1460 | extern void normalize_rt_tasks(void); |
| 1412 | 1461 | ||
| 1413 | #ifdef CONFIG_PM | 1462 | #ifdef CONFIG_PM |
