diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-10 20:42:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-10 20:42:53 -0500 |
commit | 506c10f26c481b7f8ef27c1c79290f68989b2e9e (patch) | |
tree | 03de82e812f00957aa6276dac2fe51c3358e88d7 /include/linux/sched.h | |
parent | e1df957670aef74ffd9a4ad93e6d2c90bf6b4845 (diff) | |
parent | c59765042f53a79a7a65585042ff463b69cb248c (diff) |
Merge commit 'v2.6.29-rc1' into perfcounters/core
Conflicts:
include/linux/kernel_stat.h
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 103 |
1 files changed, 78 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index fc2c6f3477e7..f134a0f7080a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -251,7 +251,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
251 | extern int runqueue_is_locked(void); | 251 | extern int runqueue_is_locked(void); |
252 | extern void task_rq_unlock_wait(struct task_struct *p); | 252 | extern void task_rq_unlock_wait(struct task_struct *p); |
253 | 253 | ||
254 | extern cpumask_t nohz_cpu_mask; | 254 | extern cpumask_var_t nohz_cpu_mask; |
255 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 255 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
256 | extern int select_nohz_load_balancer(int cpu); | 256 | extern int select_nohz_load_balancer(int cpu); |
257 | #else | 257 | #else |
@@ -285,7 +285,6 @@ long io_schedule_timeout(long timeout); | |||
285 | 285 | ||
286 | extern void cpu_init (void); | 286 | extern void cpu_init (void); |
287 | extern void trap_init(void); | 287 | extern void trap_init(void); |
288 | extern void account_process_tick(struct task_struct *task, int user); | ||
289 | extern void update_process_times(int user); | 288 | extern void update_process_times(int user); |
290 | extern void scheduler_tick(void); | 289 | extern void scheduler_tick(void); |
291 | 290 | ||
@@ -388,6 +387,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
388 | (mm)->hiwater_vm = (mm)->total_vm; \ | 387 | (mm)->hiwater_vm = (mm)->total_vm; \ |
389 | } while (0) | 388 | } while (0) |
390 | 389 | ||
390 | #define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) | ||
391 | #define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) | ||
392 | |||
391 | extern void set_dumpable(struct mm_struct *mm, int value); | 393 | extern void set_dumpable(struct mm_struct *mm, int value); |
392 | extern int get_dumpable(struct mm_struct *mm); | 394 | extern int get_dumpable(struct mm_struct *mm); |
393 | 395 | ||
@@ -759,20 +761,51 @@ enum cpu_idle_type { | |||
759 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 761 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
760 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 762 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ |
761 | 763 | ||
762 | #define BALANCE_FOR_MC_POWER \ | 764 | enum powersavings_balance_level { |
763 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 765 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
766 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
767 | * first for long running threads | ||
768 | */ | ||
769 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
770 | * cpu package for power savings | ||
771 | */ | ||
772 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
773 | }; | ||
764 | 774 | ||
765 | #define BALANCE_FOR_PKG_POWER \ | 775 | extern int sched_mc_power_savings, sched_smt_power_savings; |
766 | ((sched_mc_power_savings || sched_smt_power_savings) ? \ | ||
767 | SD_POWERSAVINGS_BALANCE : 0) | ||
768 | 776 | ||
769 | #define test_sd_parent(sd, flag) ((sd->parent && \ | 777 | static inline int sd_balance_for_mc_power(void) |
770 | (sd->parent->flags & flag)) ? 1 : 0) | 778 | { |
779 | if (sched_smt_power_savings) | ||
780 | return SD_POWERSAVINGS_BALANCE; | ||
771 | 781 | ||
782 | return 0; | ||
783 | } | ||
784 | |||
785 | static inline int sd_balance_for_package_power(void) | ||
786 | { | ||
787 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
788 | return SD_POWERSAVINGS_BALANCE; | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | /* | ||
794 | * Optimise SD flags for power savings: | ||
795 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | ||
796 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
797 | */ | ||
798 | |||
799 | static inline int sd_power_saving_flags(void) | ||
800 | { | ||
801 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
802 | return SD_BALANCE_NEWIDLE; | ||
803 | |||
804 | return 0; | ||
805 | } | ||
772 | 806 | ||
773 | struct sched_group { | 807 | struct sched_group { |
774 | struct sched_group *next; /* Must be a circular list */ | 808 | struct sched_group *next; /* Must be a circular list */ |
775 | cpumask_t cpumask; | ||
776 | 809 | ||
777 | /* | 810 | /* |
778 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 811 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -785,8 +818,15 @@ struct sched_group { | |||
785 | * (see include/linux/reciprocal_div.h) | 818 | * (see include/linux/reciprocal_div.h) |
786 | */ | 819 | */ |
787 | u32 reciprocal_cpu_power; | 820 | u32 reciprocal_cpu_power; |
821 | |||
822 | unsigned long cpumask[]; | ||
788 | }; | 823 | }; |
789 | 824 | ||
825 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
826 | { | ||
827 | return to_cpumask(sg->cpumask); | ||
828 | } | ||
829 | |||
790 | enum sched_domain_level { | 830 | enum sched_domain_level { |
791 | SD_LV_NONE = 0, | 831 | SD_LV_NONE = 0, |
792 | SD_LV_SIBLING, | 832 | SD_LV_SIBLING, |
@@ -810,7 +850,6 @@ struct sched_domain { | |||
810 | struct sched_domain *parent; /* top domain must be null terminated */ | 850 | struct sched_domain *parent; /* top domain must be null terminated */ |
811 | struct sched_domain *child; /* bottom domain must be null terminated */ | 851 | struct sched_domain *child; /* bottom domain must be null terminated */ |
812 | struct sched_group *groups; /* the balancing groups of the domain */ | 852 | struct sched_group *groups; /* the balancing groups of the domain */ |
813 | cpumask_t span; /* span of all CPUs in this domain */ | ||
814 | unsigned long min_interval; /* Minimum balance interval ms */ | 853 | unsigned long min_interval; /* Minimum balance interval ms */ |
815 | unsigned long max_interval; /* Maximum balance interval ms */ | 854 | unsigned long max_interval; /* Maximum balance interval ms */ |
816 | unsigned int busy_factor; /* less balancing by factor if busy */ | 855 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -865,18 +904,34 @@ struct sched_domain { | |||
865 | #ifdef CONFIG_SCHED_DEBUG | 904 | #ifdef CONFIG_SCHED_DEBUG |
866 | char *name; | 905 | char *name; |
867 | #endif | 906 | #endif |
907 | |||
908 | /* span of all CPUs in this domain */ | ||
909 | unsigned long span[]; | ||
868 | }; | 910 | }; |
869 | 911 | ||
870 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 912 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
913 | { | ||
914 | return to_cpumask(sd->span); | ||
915 | } | ||
916 | |||
917 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
871 | struct sched_domain_attr *dattr_new); | 918 | struct sched_domain_attr *dattr_new); |
872 | extern int arch_reinit_sched_domains(void); | 919 | |
920 | /* Test a flag in parent sched domain */ | ||
921 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
922 | { | ||
923 | if (sd->parent && (sd->parent->flags & flag)) | ||
924 | return 1; | ||
925 | |||
926 | return 0; | ||
927 | } | ||
873 | 928 | ||
874 | #else /* CONFIG_SMP */ | 929 | #else /* CONFIG_SMP */ |
875 | 930 | ||
876 | struct sched_domain_attr; | 931 | struct sched_domain_attr; |
877 | 932 | ||
878 | static inline void | 933 | static inline void |
879 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 934 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
880 | struct sched_domain_attr *dattr_new) | 935 | struct sched_domain_attr *dattr_new) |
881 | { | 936 | { |
882 | } | 937 | } |
@@ -927,7 +982,7 @@ struct sched_class { | |||
927 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 982 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
928 | 983 | ||
929 | void (*set_cpus_allowed)(struct task_struct *p, | 984 | void (*set_cpus_allowed)(struct task_struct *p, |
930 | const cpumask_t *newmask); | 985 | const struct cpumask *newmask); |
931 | 986 | ||
932 | void (*rq_online)(struct rq *rq); | 987 | void (*rq_online)(struct rq *rq); |
933 | void (*rq_offline)(struct rq *rq); | 988 | void (*rq_offline)(struct rq *rq); |
@@ -1582,12 +1637,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1582 | 1637 | ||
1583 | #ifdef CONFIG_SMP | 1638 | #ifdef CONFIG_SMP |
1584 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1639 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1585 | const cpumask_t *new_mask); | 1640 | const struct cpumask *new_mask); |
1586 | #else | 1641 | #else |
1587 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1642 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1588 | const cpumask_t *new_mask) | 1643 | const struct cpumask *new_mask) |
1589 | { | 1644 | { |
1590 | if (!cpu_isset(0, *new_mask)) | 1645 | if (!cpumask_test_cpu(0, new_mask)) |
1591 | return -EINVAL; | 1646 | return -EINVAL; |
1592 | return 0; | 1647 | return 0; |
1593 | } | 1648 | } |
@@ -1654,16 +1709,16 @@ extern void wake_up_idle_cpu(int cpu); | |||
1654 | static inline void wake_up_idle_cpu(int cpu) { } | 1709 | static inline void wake_up_idle_cpu(int cpu) { } |
1655 | #endif | 1710 | #endif |
1656 | 1711 | ||
1657 | #ifdef CONFIG_SCHED_DEBUG | ||
1658 | extern unsigned int sysctl_sched_latency; | 1712 | extern unsigned int sysctl_sched_latency; |
1659 | extern unsigned int sysctl_sched_min_granularity; | 1713 | extern unsigned int sysctl_sched_min_granularity; |
1660 | extern unsigned int sysctl_sched_wakeup_granularity; | 1714 | extern unsigned int sysctl_sched_wakeup_granularity; |
1715 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1716 | extern unsigned int sysctl_sched_shares_thresh; | ||
1717 | #ifdef CONFIG_SCHED_DEBUG | ||
1661 | extern unsigned int sysctl_sched_child_runs_first; | 1718 | extern unsigned int sysctl_sched_child_runs_first; |
1662 | extern unsigned int sysctl_sched_features; | 1719 | extern unsigned int sysctl_sched_features; |
1663 | extern unsigned int sysctl_sched_migration_cost; | 1720 | extern unsigned int sysctl_sched_migration_cost; |
1664 | extern unsigned int sysctl_sched_nr_migrate; | 1721 | extern unsigned int sysctl_sched_nr_migrate; |
1665 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1666 | extern unsigned int sysctl_sched_shares_thresh; | ||
1667 | 1722 | ||
1668 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1723 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1669 | struct file *file, void __user *buffer, size_t *length, | 1724 | struct file *file, void __user *buffer, size_t *length, |
@@ -2198,10 +2253,8 @@ __trace_special(void *__tr, void *__data, | |||
2198 | } | 2253 | } |
2199 | #endif | 2254 | #endif |
2200 | 2255 | ||
2201 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2256 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2202 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2257 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2203 | |||
2204 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
2205 | 2258 | ||
2206 | extern void normalize_rt_tasks(void); | 2259 | extern void normalize_rt_tasks(void); |
2207 | 2260 | ||