diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 251 |
1 files changed, 178 insertions, 73 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 693f0e6c54d4..cfb680585ab8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #define SCHED_FIFO 1 | 34 | #define SCHED_FIFO 1 |
35 | #define SCHED_RR 2 | 35 | #define SCHED_RR 2 |
36 | #define SCHED_BATCH 3 | 36 | #define SCHED_BATCH 3 |
37 | /* SCHED_ISO: reserved but not implemented yet */ | ||
38 | #define SCHED_IDLE 5 | ||
37 | 39 | ||
38 | #ifdef __KERNEL__ | 40 | #ifdef __KERNEL__ |
39 | 41 | ||
@@ -130,6 +132,26 @@ extern unsigned long nr_active(void); | |||
130 | extern unsigned long nr_iowait(void); | 132 | extern unsigned long nr_iowait(void); |
131 | extern unsigned long weighted_cpuload(const int cpu); | 133 | extern unsigned long weighted_cpuload(const int cpu); |
132 | 134 | ||
135 | struct seq_file; | ||
136 | struct cfs_rq; | ||
137 | #ifdef CONFIG_SCHED_DEBUG | ||
138 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | ||
139 | extern void proc_sched_set_task(struct task_struct *p); | ||
140 | extern void | ||
141 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now); | ||
142 | #else | ||
143 | static inline void | ||
144 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) | ||
145 | { | ||
146 | } | ||
147 | static inline void proc_sched_set_task(struct task_struct *p) | ||
148 | { | ||
149 | } | ||
150 | static inline void | ||
151 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now) | ||
152 | { | ||
153 | } | ||
154 | #endif | ||
133 | 155 | ||
134 | /* | 156 | /* |
135 | * Task state bitmask. NOTE! These bits are also | 157 | * Task state bitmask. NOTE! These bits are also |
@@ -193,6 +215,7 @@ struct task_struct; | |||
193 | extern void sched_init(void); | 215 | extern void sched_init(void); |
194 | extern void sched_init_smp(void); | 216 | extern void sched_init_smp(void); |
195 | extern void init_idle(struct task_struct *idle, int cpu); | 217 | extern void init_idle(struct task_struct *idle, int cpu); |
218 | extern void init_idle_bootup_task(struct task_struct *idle); | ||
196 | 219 | ||
197 | extern cpumask_t nohz_cpu_mask; | 220 | extern cpumask_t nohz_cpu_mask; |
198 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 221 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
@@ -479,7 +502,7 @@ struct signal_struct { | |||
479 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | 502 | * from jiffies_to_ns(utime + stime) if sched_clock uses something |
480 | * other than jiffies.) | 503 | * other than jiffies.) |
481 | */ | 504 | */ |
482 | unsigned long long sched_time; | 505 | unsigned long long sum_sched_runtime; |
483 | 506 | ||
484 | /* | 507 | /* |
485 | * We don't bother to synchronize most readers of this at all, | 508 | * We don't bother to synchronize most readers of this at all, |
@@ -521,31 +544,6 @@ struct signal_struct { | |||
521 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 544 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ |
522 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | 545 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ |
523 | 546 | ||
524 | |||
525 | /* | ||
526 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | ||
527 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | ||
528 | * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority | ||
529 | * values are inverted: lower p->prio value means higher priority. | ||
530 | * | ||
531 | * The MAX_USER_RT_PRIO value allows the actual maximum | ||
532 | * RT priority to be separate from the value exported to | ||
533 | * user-space. This allows kernel threads to set their | ||
534 | * priority to a value higher than any user task. Note: | ||
535 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | ||
536 | */ | ||
537 | |||
538 | #define MAX_USER_RT_PRIO 100 | ||
539 | #define MAX_RT_PRIO MAX_USER_RT_PRIO | ||
540 | |||
541 | #define MAX_PRIO (MAX_RT_PRIO + 40) | ||
542 | |||
543 | #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) | ||
544 | #define rt_task(p) rt_prio((p)->prio) | ||
545 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) | ||
546 | #define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH) | ||
547 | #define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) | ||
548 | |||
549 | /* | 547 | /* |
550 | * Some day this will be a full-fledged user tracking system.. | 548 | * Some day this will be a full-fledged user tracking system.. |
551 | */ | 549 | */ |
@@ -583,13 +581,13 @@ struct reclaim_state; | |||
583 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 581 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
584 | struct sched_info { | 582 | struct sched_info { |
585 | /* cumulative counters */ | 583 | /* cumulative counters */ |
586 | unsigned long cpu_time, /* time spent on the cpu */ | 584 | unsigned long pcnt; /* # of times run on this cpu */ |
587 | run_delay, /* time spent waiting on a runqueue */ | 585 | unsigned long long cpu_time, /* time spent on the cpu */ |
588 | pcnt; /* # of timeslices run on this cpu */ | 586 | run_delay; /* time spent waiting on a runqueue */ |
589 | 587 | ||
590 | /* timestamps */ | 588 | /* timestamps */ |
591 | unsigned long last_arrival, /* when we last ran on a cpu */ | 589 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
592 | last_queued; /* when we were last queued to run */ | 590 | last_queued; /* when we were last queued to run */ |
593 | }; | 591 | }; |
594 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 592 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
595 | 593 | ||
@@ -639,18 +637,24 @@ static inline int sched_info_on(void) | |||
639 | #endif | 637 | #endif |
640 | } | 638 | } |
641 | 639 | ||
642 | enum idle_type | 640 | enum cpu_idle_type { |
643 | { | 641 | CPU_IDLE, |
644 | SCHED_IDLE, | 642 | CPU_NOT_IDLE, |
645 | NOT_IDLE, | 643 | CPU_NEWLY_IDLE, |
646 | NEWLY_IDLE, | 644 | CPU_MAX_IDLE_TYPES |
647 | MAX_IDLE_TYPES | ||
648 | }; | 645 | }; |
649 | 646 | ||
650 | /* | 647 | /* |
651 | * sched-domains (multiprocessor balancing) declarations: | 648 | * sched-domains (multiprocessor balancing) declarations: |
652 | */ | 649 | */ |
653 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ | 650 | |
651 | /* | ||
652 | * Increase resolution of nice-level calculations: | ||
653 | */ | ||
654 | #define SCHED_LOAD_SHIFT 10 | ||
655 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
656 | |||
657 | #define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5) | ||
654 | 658 | ||
655 | #ifdef CONFIG_SMP | 659 | #ifdef CONFIG_SMP |
656 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 660 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ |
@@ -719,14 +723,14 @@ struct sched_domain { | |||
719 | 723 | ||
720 | #ifdef CONFIG_SCHEDSTATS | 724 | #ifdef CONFIG_SCHEDSTATS |
721 | /* load_balance() stats */ | 725 | /* load_balance() stats */ |
722 | unsigned long lb_cnt[MAX_IDLE_TYPES]; | 726 | unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; |
723 | unsigned long lb_failed[MAX_IDLE_TYPES]; | 727 | unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; |
724 | unsigned long lb_balanced[MAX_IDLE_TYPES]; | 728 | unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; |
725 | unsigned long lb_imbalance[MAX_IDLE_TYPES]; | 729 | unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; |
726 | unsigned long lb_gained[MAX_IDLE_TYPES]; | 730 | unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; |
727 | unsigned long lb_hot_gained[MAX_IDLE_TYPES]; | 731 | unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
728 | unsigned long lb_nobusyg[MAX_IDLE_TYPES]; | 732 | unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
729 | unsigned long lb_nobusyq[MAX_IDLE_TYPES]; | 733 | unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
730 | 734 | ||
731 | /* Active load balancing */ | 735 | /* Active load balancing */ |
732 | unsigned long alb_cnt; | 736 | unsigned long alb_cnt; |
@@ -753,12 +757,6 @@ struct sched_domain { | |||
753 | extern int partition_sched_domains(cpumask_t *partition1, | 757 | extern int partition_sched_domains(cpumask_t *partition1, |
754 | cpumask_t *partition2); | 758 | cpumask_t *partition2); |
755 | 759 | ||
756 | /* | ||
757 | * Maximum cache size the migration-costs auto-tuning code will | ||
758 | * search from: | ||
759 | */ | ||
760 | extern unsigned int max_cache_size; | ||
761 | |||
762 | #endif /* CONFIG_SMP */ | 760 | #endif /* CONFIG_SMP */ |
763 | 761 | ||
764 | 762 | ||
@@ -809,14 +807,86 @@ struct mempolicy; | |||
809 | struct pipe_inode_info; | 807 | struct pipe_inode_info; |
810 | struct uts_namespace; | 808 | struct uts_namespace; |
811 | 809 | ||
812 | enum sleep_type { | 810 | struct rq; |
813 | SLEEP_NORMAL, | 811 | struct sched_domain; |
814 | SLEEP_NONINTERACTIVE, | 812 | |
815 | SLEEP_INTERACTIVE, | 813 | struct sched_class { |
816 | SLEEP_INTERRUPTED, | 814 | struct sched_class *next; |
815 | |||
816 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, | ||
817 | int wakeup, u64 now); | ||
818 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, | ||
819 | int sleep, u64 now); | ||
820 | void (*yield_task) (struct rq *rq, struct task_struct *p); | ||
821 | |||
822 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | ||
823 | |||
824 | struct task_struct * (*pick_next_task) (struct rq *rq, u64 now); | ||
825 | void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now); | ||
826 | |||
827 | int (*load_balance) (struct rq *this_rq, int this_cpu, | ||
828 | struct rq *busiest, | ||
829 | unsigned long max_nr_move, unsigned long max_load_move, | ||
830 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
831 | int *all_pinned, unsigned long *total_load_moved); | ||
832 | |||
833 | void (*set_curr_task) (struct rq *rq); | ||
834 | void (*task_tick) (struct rq *rq, struct task_struct *p); | ||
835 | void (*task_new) (struct rq *rq, struct task_struct *p); | ||
817 | }; | 836 | }; |
818 | 837 | ||
819 | struct prio_array; | 838 | struct load_weight { |
839 | unsigned long weight, inv_weight; | ||
840 | }; | ||
841 | |||
842 | /* | ||
843 | * CFS stats for a schedulable entity (task, task-group etc) | ||
844 | * | ||
845 | * Current field usage histogram: | ||
846 | * | ||
847 | * 4 se->block_start | ||
848 | * 4 se->run_node | ||
849 | * 4 se->sleep_start | ||
850 | * 4 se->sleep_start_fair | ||
851 | * 6 se->load.weight | ||
852 | * 7 se->delta_fair | ||
853 | * 15 se->wait_runtime | ||
854 | */ | ||
855 | struct sched_entity { | ||
856 | long wait_runtime; | ||
857 | unsigned long delta_fair_run; | ||
858 | unsigned long delta_fair_sleep; | ||
859 | unsigned long delta_exec; | ||
860 | s64 fair_key; | ||
861 | struct load_weight load; /* for load-balancing */ | ||
862 | struct rb_node run_node; | ||
863 | unsigned int on_rq; | ||
864 | |||
865 | u64 wait_start_fair; | ||
866 | u64 wait_start; | ||
867 | u64 exec_start; | ||
868 | u64 sleep_start; | ||
869 | u64 sleep_start_fair; | ||
870 | u64 block_start; | ||
871 | u64 sleep_max; | ||
872 | u64 block_max; | ||
873 | u64 exec_max; | ||
874 | u64 wait_max; | ||
875 | u64 last_ran; | ||
876 | |||
877 | u64 sum_exec_runtime; | ||
878 | s64 sum_wait_runtime; | ||
879 | s64 sum_sleep_runtime; | ||
880 | unsigned long wait_runtime_overruns; | ||
881 | unsigned long wait_runtime_underruns; | ||
882 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
883 | struct sched_entity *parent; | ||
884 | /* rq on which this entity is (to be) queued: */ | ||
885 | struct cfs_rq *cfs_rq; | ||
886 | /* rq "owned" by this entity/group: */ | ||
887 | struct cfs_rq *my_q; | ||
888 | #endif | ||
889 | }; | ||
820 | 890 | ||
821 | struct task_struct { | 891 | struct task_struct { |
822 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 892 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
@@ -832,23 +902,20 @@ struct task_struct { | |||
832 | int oncpu; | 902 | int oncpu; |
833 | #endif | 903 | #endif |
834 | #endif | 904 | #endif |
835 | int load_weight; /* for niceness load balancing purposes */ | 905 | |
836 | int prio, static_prio, normal_prio; | 906 | int prio, static_prio, normal_prio; |
837 | struct list_head run_list; | 907 | struct list_head run_list; |
838 | struct prio_array *array; | 908 | struct sched_class *sched_class; |
909 | struct sched_entity se; | ||
839 | 910 | ||
840 | unsigned short ioprio; | 911 | unsigned short ioprio; |
841 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 912 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
842 | unsigned int btrace_seq; | 913 | unsigned int btrace_seq; |
843 | #endif | 914 | #endif |
844 | unsigned long sleep_avg; | ||
845 | unsigned long long timestamp, last_ran; | ||
846 | unsigned long long sched_time; /* sched_clock time spent running */ | ||
847 | enum sleep_type sleep_type; | ||
848 | 915 | ||
849 | unsigned int policy; | 916 | unsigned int policy; |
850 | cpumask_t cpus_allowed; | 917 | cpumask_t cpus_allowed; |
851 | unsigned int time_slice, first_time_slice; | 918 | unsigned int time_slice; |
852 | 919 | ||
853 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 920 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
854 | struct sched_info sched_info; | 921 | struct sched_info sched_info; |
@@ -1078,6 +1145,37 @@ struct task_struct { | |||
1078 | #endif | 1145 | #endif |
1079 | }; | 1146 | }; |
1080 | 1147 | ||
1148 | /* | ||
1149 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | ||
1150 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | ||
1151 | * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority | ||
1152 | * values are inverted: lower p->prio value means higher priority. | ||
1153 | * | ||
1154 | * The MAX_USER_RT_PRIO value allows the actual maximum | ||
1155 | * RT priority to be separate from the value exported to | ||
1156 | * user-space. This allows kernel threads to set their | ||
1157 | * priority to a value higher than any user task. Note: | ||
1158 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | ||
1159 | */ | ||
1160 | |||
1161 | #define MAX_USER_RT_PRIO 100 | ||
1162 | #define MAX_RT_PRIO MAX_USER_RT_PRIO | ||
1163 | |||
1164 | #define MAX_PRIO (MAX_RT_PRIO + 40) | ||
1165 | #define DEFAULT_PRIO (MAX_RT_PRIO + 20) | ||
1166 | |||
1167 | static inline int rt_prio(int prio) | ||
1168 | { | ||
1169 | if (unlikely(prio < MAX_RT_PRIO)) | ||
1170 | return 1; | ||
1171 | return 0; | ||
1172 | } | ||
1173 | |||
1174 | static inline int rt_task(struct task_struct *p) | ||
1175 | { | ||
1176 | return rt_prio(p->prio); | ||
1177 | } | ||
1178 | |||
1081 | static inline pid_t process_group(struct task_struct *tsk) | 1179 | static inline pid_t process_group(struct task_struct *tsk) |
1082 | { | 1180 | { |
1083 | return tsk->signal->pgrp; | 1181 | return tsk->signal->pgrp; |
@@ -1223,7 +1321,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1223 | 1321 | ||
1224 | extern unsigned long long sched_clock(void); | 1322 | extern unsigned long long sched_clock(void); |
1225 | extern unsigned long long | 1323 | extern unsigned long long |
1226 | current_sched_time(const struct task_struct *current_task); | 1324 | task_sched_runtime(struct task_struct *task); |
1227 | 1325 | ||
1228 | /* sched_exec is called by processes performing an exec */ | 1326 | /* sched_exec is called by processes performing an exec */ |
1229 | #ifdef CONFIG_SMP | 1327 | #ifdef CONFIG_SMP |
@@ -1232,6 +1330,8 @@ extern void sched_exec(void); | |||
1232 | #define sched_exec() {} | 1330 | #define sched_exec() {} |
1233 | #endif | 1331 | #endif |
1234 | 1332 | ||
1333 | extern void sched_clock_unstable_event(void); | ||
1334 | |||
1235 | #ifdef CONFIG_HOTPLUG_CPU | 1335 | #ifdef CONFIG_HOTPLUG_CPU |
1236 | extern void idle_task_exit(void); | 1336 | extern void idle_task_exit(void); |
1237 | #else | 1337 | #else |
@@ -1240,6 +1340,14 @@ static inline void idle_task_exit(void) {} | |||
1240 | 1340 | ||
1241 | extern void sched_idle_next(void); | 1341 | extern void sched_idle_next(void); |
1242 | 1342 | ||
1343 | extern unsigned int sysctl_sched_granularity; | ||
1344 | extern unsigned int sysctl_sched_wakeup_granularity; | ||
1345 | extern unsigned int sysctl_sched_batch_wakeup_granularity; | ||
1346 | extern unsigned int sysctl_sched_stat_granularity; | ||
1347 | extern unsigned int sysctl_sched_runtime_limit; | ||
1348 | extern unsigned int sysctl_sched_child_runs_first; | ||
1349 | extern unsigned int sysctl_sched_features; | ||
1350 | |||
1243 | #ifdef CONFIG_RT_MUTEXES | 1351 | #ifdef CONFIG_RT_MUTEXES |
1244 | extern int rt_mutex_getprio(struct task_struct *p); | 1352 | extern int rt_mutex_getprio(struct task_struct *p); |
1245 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 1353 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
@@ -1317,8 +1425,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | |||
1317 | #else | 1425 | #else |
1318 | static inline void kick_process(struct task_struct *tsk) { } | 1426 | static inline void kick_process(struct task_struct *tsk) { } |
1319 | #endif | 1427 | #endif |
1320 | extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); | 1428 | extern void sched_fork(struct task_struct *p, int clone_flags); |
1321 | extern void FASTCALL(sched_exit(struct task_struct * p)); | 1429 | extern void sched_dead(struct task_struct *p); |
1322 | 1430 | ||
1323 | extern int in_group_p(gid_t); | 1431 | extern int in_group_p(gid_t); |
1324 | extern int in_egroup_p(gid_t); | 1432 | extern int in_egroup_p(gid_t); |
@@ -1406,7 +1514,7 @@ extern struct mm_struct * mm_alloc(void); | |||
1406 | extern void FASTCALL(__mmdrop(struct mm_struct *)); | 1514 | extern void FASTCALL(__mmdrop(struct mm_struct *)); |
1407 | static inline void mmdrop(struct mm_struct * mm) | 1515 | static inline void mmdrop(struct mm_struct * mm) |
1408 | { | 1516 | { |
1409 | if (atomic_dec_and_test(&mm->mm_count)) | 1517 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) |
1410 | __mmdrop(mm); | 1518 | __mmdrop(mm); |
1411 | } | 1519 | } |
1412 | 1520 | ||
@@ -1638,10 +1746,7 @@ static inline unsigned int task_cpu(const struct task_struct *p) | |||
1638 | return task_thread_info(p)->cpu; | 1746 | return task_thread_info(p)->cpu; |
1639 | } | 1747 | } |
1640 | 1748 | ||
1641 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 1749 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
1642 | { | ||
1643 | task_thread_info(p)->cpu = cpu; | ||
1644 | } | ||
1645 | 1750 | ||
1646 | #else | 1751 | #else |
1647 | 1752 | ||