aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2009-01-15 12:27:20 -0500
committerTakashi Iwai <tiwai@suse.de>2009-01-15 12:27:20 -0500
commitc0106d72b8d71696dbe9dc80e2c77d4ac63f7531 (patch)
treeca96d2baecb0555e36219ed6968dc8e306e530ca /include/linux/sched.h
parent5852973c129cf7c7e6c229abb7250673fc2a50c9 (diff)
parenteff317d0834ad1ff03f747f6bc2d76b9a9c95160 (diff)
Merge branch 'topic/asoc' into next/asoc
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h103
1 files changed, 78 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8395e715809d..4cae9b81a1f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
250extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
251extern void task_rq_unlock_wait(struct task_struct *p); 251extern void task_rq_unlock_wait(struct task_struct *p);
252 252
253extern cpumask_t nohz_cpu_mask; 253extern cpumask_var_t nohz_cpu_mask;
254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
255extern int select_nohz_load_balancer(int cpu); 255extern int select_nohz_load_balancer(int cpu);
256#else 256#else
@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout);
284 284
285extern void cpu_init (void); 285extern void cpu_init (void);
286extern void trap_init(void); 286extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 287extern void update_process_times(int user);
289extern void scheduler_tick(void); 288extern void scheduler_tick(void);
290 289
@@ -387,6 +386,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
387 (mm)->hiwater_vm = (mm)->total_vm; \ 386 (mm)->hiwater_vm = (mm)->total_vm; \
388} while (0) 387} while (0)
389 388
389#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
390#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
391
390extern void set_dumpable(struct mm_struct *mm, int value); 392extern void set_dumpable(struct mm_struct *mm, int value);
391extern int get_dumpable(struct mm_struct *mm); 393extern int get_dumpable(struct mm_struct *mm);
392 394
@@ -758,20 +760,51 @@ enum cpu_idle_type {
758#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 760#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
759#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 761#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
760 762
761#define BALANCE_FOR_MC_POWER \ 763enum powersavings_balance_level {
762 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 764 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
765 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
766 * first for long running threads
767 */
768 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
769 * cpu package for power savings
770 */
771 MAX_POWERSAVINGS_BALANCE_LEVELS
772};
763 773
764#define BALANCE_FOR_PKG_POWER \ 774extern int sched_mc_power_savings, sched_smt_power_savings;
765 ((sched_mc_power_savings || sched_smt_power_savings) ? \
766 SD_POWERSAVINGS_BALANCE : 0)
767 775
768#define test_sd_parent(sd, flag) ((sd->parent && \ 776static inline int sd_balance_for_mc_power(void)
769 (sd->parent->flags & flag)) ? 1 : 0) 777{
778 if (sched_smt_power_savings)
779 return SD_POWERSAVINGS_BALANCE;
770 780
781 return 0;
782}
783
784static inline int sd_balance_for_package_power(void)
785{
786 if (sched_mc_power_savings | sched_smt_power_savings)
787 return SD_POWERSAVINGS_BALANCE;
788
789 return 0;
790}
791
792/*
793 * Optimise SD flags for power savings:
794 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
795 * Keep default SD flags if sched_{smt,mc}_power_saving=0
796 */
797
798static inline int sd_power_saving_flags(void)
799{
800 if (sched_mc_power_savings | sched_smt_power_savings)
801 return SD_BALANCE_NEWIDLE;
802
803 return 0;
804}
771 805
772struct sched_group { 806struct sched_group {
773 struct sched_group *next; /* Must be a circular list */ 807 struct sched_group *next; /* Must be a circular list */
774 cpumask_t cpumask;
775 808
776 /* 809 /*
777 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 810 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -784,8 +817,15 @@ struct sched_group {
784 * (see include/linux/reciprocal_div.h) 817 * (see include/linux/reciprocal_div.h)
785 */ 818 */
786 u32 reciprocal_cpu_power; 819 u32 reciprocal_cpu_power;
820
821 unsigned long cpumask[];
787}; 822};
788 823
824static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
825{
826 return to_cpumask(sg->cpumask);
827}
828
789enum sched_domain_level { 829enum sched_domain_level {
790 SD_LV_NONE = 0, 830 SD_LV_NONE = 0,
791 SD_LV_SIBLING, 831 SD_LV_SIBLING,
@@ -809,7 +849,6 @@ struct sched_domain {
809 struct sched_domain *parent; /* top domain must be null terminated */ 849 struct sched_domain *parent; /* top domain must be null terminated */
810 struct sched_domain *child; /* bottom domain must be null terminated */ 850 struct sched_domain *child; /* bottom domain must be null terminated */
811 struct sched_group *groups; /* the balancing groups of the domain */ 851 struct sched_group *groups; /* the balancing groups of the domain */
812 cpumask_t span; /* span of all CPUs in this domain */
813 unsigned long min_interval; /* Minimum balance interval ms */ 852 unsigned long min_interval; /* Minimum balance interval ms */
814 unsigned long max_interval; /* Maximum balance interval ms */ 853 unsigned long max_interval; /* Maximum balance interval ms */
815 unsigned int busy_factor; /* less balancing by factor if busy */ 854 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -864,18 +903,34 @@ struct sched_domain {
864#ifdef CONFIG_SCHED_DEBUG 903#ifdef CONFIG_SCHED_DEBUG
865 char *name; 904 char *name;
866#endif 905#endif
906
907 /* span of all CPUs in this domain */
908 unsigned long span[];
867}; 909};
868 910
869extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 911static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
912{
913 return to_cpumask(sd->span);
914}
915
916extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
870 struct sched_domain_attr *dattr_new); 917 struct sched_domain_attr *dattr_new);
871extern int arch_reinit_sched_domains(void); 918
919/* Test a flag in parent sched domain */
920static inline int test_sd_parent(struct sched_domain *sd, int flag)
921{
922 if (sd->parent && (sd->parent->flags & flag))
923 return 1;
924
925 return 0;
926}
872 927
873#else /* CONFIG_SMP */ 928#else /* CONFIG_SMP */
874 929
875struct sched_domain_attr; 930struct sched_domain_attr;
876 931
877static inline void 932static inline void
878partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 933partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
879 struct sched_domain_attr *dattr_new) 934 struct sched_domain_attr *dattr_new)
880{ 935{
881} 936}
@@ -926,7 +981,7 @@ struct sched_class {
926 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 981 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
927 982
928 void (*set_cpus_allowed)(struct task_struct *p, 983 void (*set_cpus_allowed)(struct task_struct *p,
929 const cpumask_t *newmask); 984 const struct cpumask *newmask);
930 985
931 void (*rq_online)(struct rq *rq); 986 void (*rq_online)(struct rq *rq);
932 void (*rq_offline)(struct rq *rq); 987 void (*rq_offline)(struct rq *rq);
@@ -1579,12 +1634,12 @@ extern cputime_t task_gtime(struct task_struct *p);
1579 1634
1580#ifdef CONFIG_SMP 1635#ifdef CONFIG_SMP
1581extern int set_cpus_allowed_ptr(struct task_struct *p, 1636extern int set_cpus_allowed_ptr(struct task_struct *p,
1582 const cpumask_t *new_mask); 1637 const struct cpumask *new_mask);
1583#else 1638#else
1584static inline int set_cpus_allowed_ptr(struct task_struct *p, 1639static inline int set_cpus_allowed_ptr(struct task_struct *p,
1585 const cpumask_t *new_mask) 1640 const struct cpumask *new_mask)
1586{ 1641{
1587 if (!cpu_isset(0, *new_mask)) 1642 if (!cpumask_test_cpu(0, new_mask))
1588 return -EINVAL; 1643 return -EINVAL;
1589 return 0; 1644 return 0;
1590} 1645}
@@ -1651,16 +1706,16 @@ extern void wake_up_idle_cpu(int cpu);
1651static inline void wake_up_idle_cpu(int cpu) { } 1706static inline void wake_up_idle_cpu(int cpu) { }
1652#endif 1707#endif
1653 1708
1654#ifdef CONFIG_SCHED_DEBUG
1655extern unsigned int sysctl_sched_latency; 1709extern unsigned int sysctl_sched_latency;
1656extern unsigned int sysctl_sched_min_granularity; 1710extern unsigned int sysctl_sched_min_granularity;
1657extern unsigned int sysctl_sched_wakeup_granularity; 1711extern unsigned int sysctl_sched_wakeup_granularity;
1712extern unsigned int sysctl_sched_shares_ratelimit;
1713extern unsigned int sysctl_sched_shares_thresh;
1714#ifdef CONFIG_SCHED_DEBUG
1658extern unsigned int sysctl_sched_child_runs_first; 1715extern unsigned int sysctl_sched_child_runs_first;
1659extern unsigned int sysctl_sched_features; 1716extern unsigned int sysctl_sched_features;
1660extern unsigned int sysctl_sched_migration_cost; 1717extern unsigned int sysctl_sched_migration_cost;
1661extern unsigned int sysctl_sched_nr_migrate; 1718extern unsigned int sysctl_sched_nr_migrate;
1662extern unsigned int sysctl_sched_shares_ratelimit;
1663extern unsigned int sysctl_sched_shares_thresh;
1664 1719
1665int sched_nr_latency_handler(struct ctl_table *table, int write, 1720int sched_nr_latency_handler(struct ctl_table *table, int write,
1666 struct file *file, void __user *buffer, size_t *length, 1721 struct file *file, void __user *buffer, size_t *length,
@@ -2195,10 +2250,8 @@ __trace_special(void *__tr, void *__data,
2195} 2250}
2196#endif 2251#endif
2197 2252
2198extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2253extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2199extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2254extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2200
2201extern int sched_mc_power_savings, sched_smt_power_savings;
2202 2255
2203extern void normalize_rt_tasks(void); 2256extern void normalize_rt_tasks(void);
2204 2257