aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 20:42:53 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 20:42:53 -0500
commit506c10f26c481b7f8ef27c1c79290f68989b2e9e (patch)
tree03de82e812f00957aa6276dac2fe51c3358e88d7 /include/linux/sched.h
parente1df957670aef74ffd9a4ad93e6d2c90bf6b4845 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into perfcounters/core
Conflicts: include/linux/kernel_stat.h
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h103
1 files changed, 78 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fc2c6f3477e7..f134a0f7080a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -251,7 +251,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
251extern int runqueue_is_locked(void); 251extern int runqueue_is_locked(void);
252extern void task_rq_unlock_wait(struct task_struct *p); 252extern void task_rq_unlock_wait(struct task_struct *p);
253 253
254extern cpumask_t nohz_cpu_mask; 254extern cpumask_var_t nohz_cpu_mask;
255#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 255#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
256extern int select_nohz_load_balancer(int cpu); 256extern int select_nohz_load_balancer(int cpu);
257#else 257#else
@@ -285,7 +285,6 @@ long io_schedule_timeout(long timeout);
285 285
286extern void cpu_init (void); 286extern void cpu_init (void);
287extern void trap_init(void); 287extern void trap_init(void);
288extern void account_process_tick(struct task_struct *task, int user);
289extern void update_process_times(int user); 288extern void update_process_times(int user);
290extern void scheduler_tick(void); 289extern void scheduler_tick(void);
291 290
@@ -388,6 +387,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
388 (mm)->hiwater_vm = (mm)->total_vm; \ 387 (mm)->hiwater_vm = (mm)->total_vm; \
389} while (0) 388} while (0)
390 389
390#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
391#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
392
391extern void set_dumpable(struct mm_struct *mm, int value); 393extern void set_dumpable(struct mm_struct *mm, int value);
392extern int get_dumpable(struct mm_struct *mm); 394extern int get_dumpable(struct mm_struct *mm);
393 395
@@ -759,20 +761,51 @@ enum cpu_idle_type {
759#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 761#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
760#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 762#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
761 763
762#define BALANCE_FOR_MC_POWER \ 764enum powersavings_balance_level {
763 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 765 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
766 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
767 * first for long running threads
768 */
769 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
770 * cpu package for power savings
771 */
772 MAX_POWERSAVINGS_BALANCE_LEVELS
773};
764 774
765#define BALANCE_FOR_PKG_POWER \ 775extern int sched_mc_power_savings, sched_smt_power_savings;
766 ((sched_mc_power_savings || sched_smt_power_savings) ? \
767 SD_POWERSAVINGS_BALANCE : 0)
768 776
769#define test_sd_parent(sd, flag) ((sd->parent && \ 777static inline int sd_balance_for_mc_power(void)
770 (sd->parent->flags & flag)) ? 1 : 0) 778{
779 if (sched_smt_power_savings)
780 return SD_POWERSAVINGS_BALANCE;
771 781
782 return 0;
783}
784
785static inline int sd_balance_for_package_power(void)
786{
787 if (sched_mc_power_savings | sched_smt_power_savings)
788 return SD_POWERSAVINGS_BALANCE;
789
790 return 0;
791}
792
793/*
794 * Optimise SD flags for power savings:
795 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
796 * Keep default SD flags if sched_{smt,mc}_power_saving=0
797 */
798
799static inline int sd_power_saving_flags(void)
800{
801 if (sched_mc_power_savings | sched_smt_power_savings)
802 return SD_BALANCE_NEWIDLE;
803
804 return 0;
805}
772 806
773struct sched_group { 807struct sched_group {
774 struct sched_group *next; /* Must be a circular list */ 808 struct sched_group *next; /* Must be a circular list */
775 cpumask_t cpumask;
776 809
777 /* 810 /*
778 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 811 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -785,8 +818,15 @@ struct sched_group {
785 * (see include/linux/reciprocal_div.h) 818 * (see include/linux/reciprocal_div.h)
786 */ 819 */
787 u32 reciprocal_cpu_power; 820 u32 reciprocal_cpu_power;
821
822 unsigned long cpumask[];
788}; 823};
789 824
825static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
826{
827 return to_cpumask(sg->cpumask);
828}
829
790enum sched_domain_level { 830enum sched_domain_level {
791 SD_LV_NONE = 0, 831 SD_LV_NONE = 0,
792 SD_LV_SIBLING, 832 SD_LV_SIBLING,
@@ -810,7 +850,6 @@ struct sched_domain {
810 struct sched_domain *parent; /* top domain must be null terminated */ 850 struct sched_domain *parent; /* top domain must be null terminated */
811 struct sched_domain *child; /* bottom domain must be null terminated */ 851 struct sched_domain *child; /* bottom domain must be null terminated */
812 struct sched_group *groups; /* the balancing groups of the domain */ 852 struct sched_group *groups; /* the balancing groups of the domain */
813 cpumask_t span; /* span of all CPUs in this domain */
814 unsigned long min_interval; /* Minimum balance interval ms */ 853 unsigned long min_interval; /* Minimum balance interval ms */
815 unsigned long max_interval; /* Maximum balance interval ms */ 854 unsigned long max_interval; /* Maximum balance interval ms */
816 unsigned int busy_factor; /* less balancing by factor if busy */ 855 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -865,18 +904,34 @@ struct sched_domain {
865#ifdef CONFIG_SCHED_DEBUG 904#ifdef CONFIG_SCHED_DEBUG
866 char *name; 905 char *name;
867#endif 906#endif
907
908 /* span of all CPUs in this domain */
909 unsigned long span[];
868}; 910};
869 911
870extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 912static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
913{
914 return to_cpumask(sd->span);
915}
916
917extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
871 struct sched_domain_attr *dattr_new); 918 struct sched_domain_attr *dattr_new);
872extern int arch_reinit_sched_domains(void); 919
920/* Test a flag in parent sched domain */
921static inline int test_sd_parent(struct sched_domain *sd, int flag)
922{
923 if (sd->parent && (sd->parent->flags & flag))
924 return 1;
925
926 return 0;
927}
873 928
874#else /* CONFIG_SMP */ 929#else /* CONFIG_SMP */
875 930
876struct sched_domain_attr; 931struct sched_domain_attr;
877 932
878static inline void 933static inline void
879partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 934partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
880 struct sched_domain_attr *dattr_new) 935 struct sched_domain_attr *dattr_new)
881{ 936{
882} 937}
@@ -927,7 +982,7 @@ struct sched_class {
927 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 982 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
928 983
929 void (*set_cpus_allowed)(struct task_struct *p, 984 void (*set_cpus_allowed)(struct task_struct *p,
930 const cpumask_t *newmask); 985 const struct cpumask *newmask);
931 986
932 void (*rq_online)(struct rq *rq); 987 void (*rq_online)(struct rq *rq);
933 void (*rq_offline)(struct rq *rq); 988 void (*rq_offline)(struct rq *rq);
@@ -1582,12 +1637,12 @@ extern cputime_t task_gtime(struct task_struct *p);
1582 1637
1583#ifdef CONFIG_SMP 1638#ifdef CONFIG_SMP
1584extern int set_cpus_allowed_ptr(struct task_struct *p, 1639extern int set_cpus_allowed_ptr(struct task_struct *p,
1585 const cpumask_t *new_mask); 1640 const struct cpumask *new_mask);
1586#else 1641#else
1587static inline int set_cpus_allowed_ptr(struct task_struct *p, 1642static inline int set_cpus_allowed_ptr(struct task_struct *p,
1588 const cpumask_t *new_mask) 1643 const struct cpumask *new_mask)
1589{ 1644{
1590 if (!cpu_isset(0, *new_mask)) 1645 if (!cpumask_test_cpu(0, new_mask))
1591 return -EINVAL; 1646 return -EINVAL;
1592 return 0; 1647 return 0;
1593} 1648}
@@ -1654,16 +1709,16 @@ extern void wake_up_idle_cpu(int cpu);
1654static inline void wake_up_idle_cpu(int cpu) { } 1709static inline void wake_up_idle_cpu(int cpu) { }
1655#endif 1710#endif
1656 1711
1657#ifdef CONFIG_SCHED_DEBUG
1658extern unsigned int sysctl_sched_latency; 1712extern unsigned int sysctl_sched_latency;
1659extern unsigned int sysctl_sched_min_granularity; 1713extern unsigned int sysctl_sched_min_granularity;
1660extern unsigned int sysctl_sched_wakeup_granularity; 1714extern unsigned int sysctl_sched_wakeup_granularity;
1715extern unsigned int sysctl_sched_shares_ratelimit;
1716extern unsigned int sysctl_sched_shares_thresh;
1717#ifdef CONFIG_SCHED_DEBUG
1661extern unsigned int sysctl_sched_child_runs_first; 1718extern unsigned int sysctl_sched_child_runs_first;
1662extern unsigned int sysctl_sched_features; 1719extern unsigned int sysctl_sched_features;
1663extern unsigned int sysctl_sched_migration_cost; 1720extern unsigned int sysctl_sched_migration_cost;
1664extern unsigned int sysctl_sched_nr_migrate; 1721extern unsigned int sysctl_sched_nr_migrate;
1665extern unsigned int sysctl_sched_shares_ratelimit;
1666extern unsigned int sysctl_sched_shares_thresh;
1667 1722
1668int sched_nr_latency_handler(struct ctl_table *table, int write, 1723int sched_nr_latency_handler(struct ctl_table *table, int write,
1669 struct file *file, void __user *buffer, size_t *length, 1724 struct file *file, void __user *buffer, size_t *length,
@@ -2198,10 +2253,8 @@ __trace_special(void *__tr, void *__data,
2198} 2253}
2199#endif 2254#endif
2200 2255
2201extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2256extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2202extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2257extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2203
2204extern int sched_mc_power_savings, sched_smt_power_savings;
2205 2258
2206extern void normalize_rt_tasks(void); 2259extern void normalize_rt_tasks(void);
2207 2260