aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
commit20b8a59f2461e1be911dce2cfafefab9d22e4eee (patch)
treee40e8558726629818b6dbdcdd64e93823f719fcd /include/linux/sched.h
parentfa72e9e484c16f0c9aee23981917d8c8c03f0482 (diff)
sched: cfs, core data types
add the CFS data types to sched.h. (the old scheduler is still fully intact.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h82
1 files changed, 82 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 90420321994f..995eb407c234 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -820,6 +820,86 @@ enum sleep_type {
820}; 820};
821 821
822struct prio_array; 822struct prio_array;
823struct rq;
824struct sched_domain;
825
826struct sched_class {
827 struct sched_class *next;
828
829 void (*enqueue_task) (struct rq *rq, struct task_struct *p,
830 int wakeup, u64 now);
831 void (*dequeue_task) (struct rq *rq, struct task_struct *p,
832 int sleep, u64 now);
833 void (*yield_task) (struct rq *rq, struct task_struct *p);
834
835 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
836
837 struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
838 void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
839
840 int (*load_balance) (struct rq *this_rq, int this_cpu,
841 struct rq *busiest,
842 unsigned long max_nr_move, unsigned long max_load_move,
843 struct sched_domain *sd, enum cpu_idle_type idle,
844 int *all_pinned, unsigned long *total_load_moved);
845
846 void (*set_curr_task) (struct rq *rq);
847 void (*task_tick) (struct rq *rq, struct task_struct *p);
848 void (*task_new) (struct rq *rq, struct task_struct *p);
849};
850
851struct load_weight {
852 unsigned long weight, inv_weight;
853};
854
855/*
856 * CFS stats for a schedulable entity (task, task-group etc)
857 *
858 * Current field usage histogram:
859 *
860 * 4 se->block_start
861 * 4 se->run_node
862 * 4 se->sleep_start
863 * 4 se->sleep_start_fair
864 * 6 se->load.weight
865 * 7 se->delta_fair
866 * 15 se->wait_runtime
867 */
868struct sched_entity {
869 long wait_runtime;
870 unsigned long delta_fair_run;
871 unsigned long delta_fair_sleep;
872 unsigned long delta_exec;
873 s64 fair_key;
874 struct load_weight load; /* for load-balancing */
875 struct rb_node run_node;
876 unsigned int on_rq;
877
878 u64 wait_start_fair;
879 u64 wait_start;
880 u64 exec_start;
881 u64 sleep_start;
882 u64 sleep_start_fair;
883 u64 block_start;
884 u64 sleep_max;
885 u64 block_max;
886 u64 exec_max;
887 u64 wait_max;
888 u64 last_ran;
889
890 u64 sum_exec_runtime;
891 s64 sum_wait_runtime;
892 s64 sum_sleep_runtime;
893 unsigned long wait_runtime_overruns;
894 unsigned long wait_runtime_underruns;
895#ifdef CONFIG_FAIR_GROUP_SCHED
896 struct sched_entity *parent;
897 /* rq on which this entity is (to be) queued: */
898 struct cfs_rq *cfs_rq;
899 /* rq "owned" by this entity/group: */
900 struct cfs_rq *my_q;
901#endif
902};
823 903
824struct task_struct { 904struct task_struct {
825 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 905 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -839,6 +919,8 @@ struct task_struct {
839 int prio, static_prio, normal_prio; 919 int prio, static_prio, normal_prio;
840 struct list_head run_list; 920 struct list_head run_list;
841 struct prio_array *array; 921 struct prio_array *array;
922 struct sched_class *sched_class;
923 struct sched_entity se;
842 924
843 unsigned short ioprio; 925 unsigned short ioprio;
844#ifdef CONFIG_BLK_DEV_IO_TRACE 926#ifdef CONFIG_BLK_DEV_IO_TRACE