diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 27 |
1 files changed, 21 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c62a9f84d614..115af05ecabd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -190,6 +190,7 @@ extern unsigned long long time_sync_thresh; | |||
| 190 | /* in tsk->state again */ | 190 | /* in tsk->state again */ |
| 191 | #define TASK_DEAD 64 | 191 | #define TASK_DEAD 64 |
| 192 | #define TASK_WAKEKILL 128 | 192 | #define TASK_WAKEKILL 128 |
| 193 | #define TASK_WAKING 256 | ||
| 193 | 194 | ||
| 194 | /* Convenience macros for the sake of set_task_state */ | 195 | /* Convenience macros for the sake of set_task_state */ |
| 195 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 196 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| @@ -256,7 +257,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); | |||
| 256 | extern void init_idle(struct task_struct *idle, int cpu); | 257 | extern void init_idle(struct task_struct *idle, int cpu); |
| 257 | extern void init_idle_bootup_task(struct task_struct *idle); | 258 | extern void init_idle_bootup_task(struct task_struct *idle); |
| 258 | 259 | ||
| 259 | extern int runqueue_is_locked(void); | 260 | extern int runqueue_is_locked(int cpu); |
| 260 | extern void task_rq_unlock_wait(struct task_struct *p); | 261 | extern void task_rq_unlock_wait(struct task_struct *p); |
| 261 | 262 | ||
| 262 | extern cpumask_var_t nohz_cpu_mask; | 263 | extern cpumask_var_t nohz_cpu_mask; |
| @@ -802,14 +803,14 @@ enum cpu_idle_type { | |||
| 802 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ | 803 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ |
| 803 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ | 804 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ |
| 804 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ | 805 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ |
| 805 | #define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */ | 806 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ |
| 806 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 807 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
| 807 | #define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */ | 808 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ |
| 808 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 809 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
| 809 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | 810 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
| 810 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 811 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
| 811 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 812 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
| 812 | #define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ | 813 | |
| 813 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 814 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
| 814 | 815 | ||
| 815 | enum powersavings_balance_level { | 816 | enum powersavings_balance_level { |
| @@ -991,6 +992,9 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag) | |||
| 991 | return 0; | 992 | return 0; |
| 992 | } | 993 | } |
| 993 | 994 | ||
| 995 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | ||
| 996 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | ||
| 997 | |||
| 994 | #else /* CONFIG_SMP */ | 998 | #else /* CONFIG_SMP */ |
| 995 | 999 | ||
| 996 | struct sched_domain_attr; | 1000 | struct sched_domain_attr; |
| @@ -1002,6 +1006,7 @@ partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | |||
| 1002 | } | 1006 | } |
| 1003 | #endif /* !CONFIG_SMP */ | 1007 | #endif /* !CONFIG_SMP */ |
| 1004 | 1008 | ||
| 1009 | |||
| 1005 | struct io_context; /* See blkdev.h */ | 1010 | struct io_context; /* See blkdev.h */ |
| 1006 | 1011 | ||
| 1007 | 1012 | ||
| @@ -1019,6 +1024,12 @@ struct uts_namespace; | |||
| 1019 | struct rq; | 1024 | struct rq; |
| 1020 | struct sched_domain; | 1025 | struct sched_domain; |
| 1021 | 1026 | ||
| 1027 | /* | ||
| 1028 | * wake flags | ||
| 1029 | */ | ||
| 1030 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | ||
| 1031 | #define WF_FORK 0x02 /* child wakeup after fork */ | ||
| 1032 | |||
| 1022 | struct sched_class { | 1033 | struct sched_class { |
| 1023 | const struct sched_class *next; | 1034 | const struct sched_class *next; |
| 1024 | 1035 | ||
| @@ -1026,13 +1037,13 @@ struct sched_class { | |||
| 1026 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 1037 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
| 1027 | void (*yield_task) (struct rq *rq); | 1038 | void (*yield_task) (struct rq *rq); |
| 1028 | 1039 | ||
| 1029 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); | 1040 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
| 1030 | 1041 | ||
| 1031 | struct task_struct * (*pick_next_task) (struct rq *rq); | 1042 | struct task_struct * (*pick_next_task) (struct rq *rq); |
| 1032 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1043 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 1033 | 1044 | ||
| 1034 | #ifdef CONFIG_SMP | 1045 | #ifdef CONFIG_SMP |
| 1035 | int (*select_task_rq)(struct task_struct *p, int sync); | 1046 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
| 1036 | 1047 | ||
| 1037 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, | 1048 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, |
| 1038 | struct rq *busiest, unsigned long max_load_move, | 1049 | struct rq *busiest, unsigned long max_load_move, |
| @@ -1064,6 +1075,8 @@ struct sched_class { | |||
| 1064 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1075 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1065 | int oldprio, int running); | 1076 | int oldprio, int running); |
| 1066 | 1077 | ||
| 1078 | unsigned int (*get_rr_interval) (struct task_struct *task); | ||
| 1079 | |||
| 1067 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1080 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1068 | void (*moved_group) (struct task_struct *p); | 1081 | void (*moved_group) (struct task_struct *p); |
| 1069 | #endif | 1082 | #endif |
| @@ -1102,6 +1115,8 @@ struct sched_entity { | |||
| 1102 | u64 start_runtime; | 1115 | u64 start_runtime; |
| 1103 | u64 avg_wakeup; | 1116 | u64 avg_wakeup; |
| 1104 | 1117 | ||
| 1118 | u64 avg_running; | ||
| 1119 | |||
| 1105 | #ifdef CONFIG_SCHEDSTATS | 1120 | #ifdef CONFIG_SCHEDSTATS |
| 1106 | u64 wait_start; | 1121 | u64 wait_start; |
| 1107 | u64 wait_max; | 1122 | u64 wait_max; |
