diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 56 |
1 files changed, 48 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a37b5964828a..311380e5fe89 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -703,6 +703,7 @@ enum cpu_idle_type { | |||
703 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | 703 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ |
704 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ | 704 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ |
705 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 705 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
706 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | ||
706 | 707 | ||
707 | #define BALANCE_FOR_MC_POWER \ | 708 | #define BALANCE_FOR_MC_POWER \ |
708 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 709 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) |
@@ -732,12 +733,31 @@ struct sched_group { | |||
732 | u32 reciprocal_cpu_power; | 733 | u32 reciprocal_cpu_power; |
733 | }; | 734 | }; |
734 | 735 | ||
736 | enum sched_domain_level { | ||
737 | SD_LV_NONE = 0, | ||
738 | SD_LV_SIBLING, | ||
739 | SD_LV_MC, | ||
740 | SD_LV_CPU, | ||
741 | SD_LV_NODE, | ||
742 | SD_LV_ALLNODES, | ||
743 | SD_LV_MAX | ||
744 | }; | ||
745 | |||
746 | struct sched_domain_attr { | ||
747 | int relax_domain_level; | ||
748 | }; | ||
749 | |||
750 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | ||
751 | .relax_domain_level = -1, \ | ||
752 | } | ||
753 | |||
735 | struct sched_domain { | 754 | struct sched_domain { |
736 | /* These fields must be setup */ | 755 | /* These fields must be setup */ |
737 | struct sched_domain *parent; /* top domain must be null terminated */ | 756 | struct sched_domain *parent; /* top domain must be null terminated */ |
738 | struct sched_domain *child; /* bottom domain must be null terminated */ | 757 | struct sched_domain *child; /* bottom domain must be null terminated */ |
739 | struct sched_group *groups; /* the balancing groups of the domain */ | 758 | struct sched_group *groups; /* the balancing groups of the domain */ |
740 | cpumask_t span; /* span of all CPUs in this domain */ | 759 | cpumask_t span; /* span of all CPUs in this domain */ |
760 | int first_cpu; /* cache of the first cpu in this domain */ | ||
741 | unsigned long min_interval; /* Minimum balance interval ms */ | 761 | unsigned long min_interval; /* Minimum balance interval ms */ |
742 | unsigned long max_interval; /* Maximum balance interval ms */ | 762 | unsigned long max_interval; /* Maximum balance interval ms */ |
743 | unsigned int busy_factor; /* less balancing by factor if busy */ | 763 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -749,6 +769,7 @@ struct sched_domain { | |||
749 | unsigned int wake_idx; | 769 | unsigned int wake_idx; |
750 | unsigned int forkexec_idx; | 770 | unsigned int forkexec_idx; |
751 | int flags; /* See SD_* */ | 771 | int flags; /* See SD_* */ |
772 | enum sched_domain_level level; | ||
752 | 773 | ||
753 | /* Runtime fields. */ | 774 | /* Runtime fields. */ |
754 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 775 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -788,7 +809,8 @@ struct sched_domain { | |||
788 | #endif | 809 | #endif |
789 | }; | 810 | }; |
790 | 811 | ||
791 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); | 812 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
813 | struct sched_domain_attr *dattr_new); | ||
792 | extern int arch_reinit_sched_domains(void); | 814 | extern int arch_reinit_sched_domains(void); |
793 | 815 | ||
794 | #endif /* CONFIG_SMP */ | 816 | #endif /* CONFIG_SMP */ |
@@ -888,7 +910,8 @@ struct sched_class { | |||
888 | void (*set_curr_task) (struct rq *rq); | 910 | void (*set_curr_task) (struct rq *rq); |
889 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 911 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
890 | void (*task_new) (struct rq *rq, struct task_struct *p); | 912 | void (*task_new) (struct rq *rq, struct task_struct *p); |
891 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | 913 | void (*set_cpus_allowed)(struct task_struct *p, |
914 | const cpumask_t *newmask); | ||
892 | 915 | ||
893 | void (*join_domain)(struct rq *rq); | 916 | void (*join_domain)(struct rq *rq); |
894 | void (*leave_domain)(struct rq *rq); | 917 | void (*leave_domain)(struct rq *rq); |
@@ -922,6 +945,7 @@ struct load_weight { | |||
922 | struct sched_entity { | 945 | struct sched_entity { |
923 | struct load_weight load; /* for load-balancing */ | 946 | struct load_weight load; /* for load-balancing */ |
924 | struct rb_node run_node; | 947 | struct rb_node run_node; |
948 | struct list_head group_node; | ||
925 | unsigned int on_rq; | 949 | unsigned int on_rq; |
926 | 950 | ||
927 | u64 exec_start; | 951 | u64 exec_start; |
@@ -981,6 +1005,7 @@ struct sched_rt_entity { | |||
981 | unsigned long timeout; | 1005 | unsigned long timeout; |
982 | int nr_cpus_allowed; | 1006 | int nr_cpus_allowed; |
983 | 1007 | ||
1008 | struct sched_rt_entity *back; | ||
984 | #ifdef CONFIG_RT_GROUP_SCHED | 1009 | #ifdef CONFIG_RT_GROUP_SCHED |
985 | struct sched_rt_entity *parent; | 1010 | struct sched_rt_entity *parent; |
986 | /* rq on which this entity is (to be) queued: */ | 1011 | /* rq on which this entity is (to be) queued: */ |
@@ -1501,15 +1526,21 @@ static inline void put_task_struct(struct task_struct *t) | |||
1501 | #define used_math() tsk_used_math(current) | 1526 | #define used_math() tsk_used_math(current) |
1502 | 1527 | ||
1503 | #ifdef CONFIG_SMP | 1528 | #ifdef CONFIG_SMP |
1504 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); | 1529 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1530 | const cpumask_t *new_mask); | ||
1505 | #else | 1531 | #else |
1506 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1532 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1533 | const cpumask_t *new_mask) | ||
1507 | { | 1534 | { |
1508 | if (!cpu_isset(0, new_mask)) | 1535 | if (!cpu_isset(0, *new_mask)) |
1509 | return -EINVAL; | 1536 | return -EINVAL; |
1510 | return 0; | 1537 | return 0; |
1511 | } | 1538 | } |
1512 | #endif | 1539 | #endif |
1540 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | ||
1541 | { | ||
1542 | return set_cpus_allowed_ptr(p, &new_mask); | ||
1543 | } | ||
1513 | 1544 | ||
1514 | extern unsigned long long sched_clock(void); | 1545 | extern unsigned long long sched_clock(void); |
1515 | 1546 | ||
@@ -1550,7 +1581,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1550 | extern unsigned int sysctl_sched_latency; | 1581 | extern unsigned int sysctl_sched_latency; |
1551 | extern unsigned int sysctl_sched_min_granularity; | 1582 | extern unsigned int sysctl_sched_min_granularity; |
1552 | extern unsigned int sysctl_sched_wakeup_granularity; | 1583 | extern unsigned int sysctl_sched_wakeup_granularity; |
1553 | extern unsigned int sysctl_sched_batch_wakeup_granularity; | ||
1554 | extern unsigned int sysctl_sched_child_runs_first; | 1584 | extern unsigned int sysctl_sched_child_runs_first; |
1555 | extern unsigned int sysctl_sched_features; | 1585 | extern unsigned int sysctl_sched_features; |
1556 | extern unsigned int sysctl_sched_migration_cost; | 1586 | extern unsigned int sysctl_sched_migration_cost; |
@@ -1563,6 +1593,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
1563 | extern unsigned int sysctl_sched_rt_period; | 1593 | extern unsigned int sysctl_sched_rt_period; |
1564 | extern int sysctl_sched_rt_runtime; | 1594 | extern int sysctl_sched_rt_runtime; |
1565 | 1595 | ||
1596 | int sched_rt_handler(struct ctl_table *table, int write, | ||
1597 | struct file *filp, void __user *buffer, size_t *lenp, | ||
1598 | loff_t *ppos); | ||
1599 | |||
1566 | extern unsigned int sysctl_sched_compat_yield; | 1600 | extern unsigned int sysctl_sched_compat_yield; |
1567 | 1601 | ||
1568 | #ifdef CONFIG_RT_MUTEXES | 1602 | #ifdef CONFIG_RT_MUTEXES |
@@ -2030,7 +2064,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
2030 | } | 2064 | } |
2031 | #endif | 2065 | #endif |
2032 | 2066 | ||
2033 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 2067 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); |
2034 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2068 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
2035 | 2069 | ||
2036 | extern int sched_mc_power_savings, sched_smt_power_savings; | 2070 | extern int sched_mc_power_savings, sched_smt_power_savings; |
@@ -2040,8 +2074,11 @@ extern void normalize_rt_tasks(void); | |||
2040 | #ifdef CONFIG_GROUP_SCHED | 2074 | #ifdef CONFIG_GROUP_SCHED |
2041 | 2075 | ||
2042 | extern struct task_group init_task_group; | 2076 | extern struct task_group init_task_group; |
2077 | #ifdef CONFIG_USER_SCHED | ||
2078 | extern struct task_group root_task_group; | ||
2079 | #endif | ||
2043 | 2080 | ||
2044 | extern struct task_group *sched_create_group(void); | 2081 | extern struct task_group *sched_create_group(struct task_group *parent); |
2045 | extern void sched_destroy_group(struct task_group *tg); | 2082 | extern void sched_destroy_group(struct task_group *tg); |
2046 | extern void sched_move_task(struct task_struct *tsk); | 2083 | extern void sched_move_task(struct task_struct *tsk); |
2047 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2084 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -2052,6 +2089,9 @@ extern unsigned long sched_group_shares(struct task_group *tg); | |||
2052 | extern int sched_group_set_rt_runtime(struct task_group *tg, | 2089 | extern int sched_group_set_rt_runtime(struct task_group *tg, |
2053 | long rt_runtime_us); | 2090 | long rt_runtime_us); |
2054 | extern long sched_group_rt_runtime(struct task_group *tg); | 2091 | extern long sched_group_rt_runtime(struct task_group *tg); |
2092 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
2093 | long rt_period_us); | ||
2094 | extern long sched_group_rt_period(struct task_group *tg); | ||
2055 | #endif | 2095 | #endif |
2056 | #endif | 2096 | #endif |
2057 | 2097 | ||