aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
commitb5869ce7f68b233ceb81465a7644be0d9a5f3dbb (patch)
treee3611e7f038a4a4fa813532ae57a9a626fa1434d /include
parentdf3d80f5a5c74168be42788364d13cf6c83c7b9c (diff)
parent9c63d9c021f375a2708ad79043d6f4dd1291a085 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: (140 commits) sched: sync wakeups preempt too sched: affine sync wakeups sched: guest CPU accounting: maintain guest state in KVM sched: guest CPU accounting: maintain stats in account_system_time() sched: guest CPU accounting: add guest-CPU /proc/<pid>/stat fields sched: guest CPU accounting: add guest-CPU /proc/stat field sched: domain sysctl fixes: add terminator comment sched: domain sysctl fixes: do not crash on allocation failure sched: domain sysctl fixes: unregister the sysctl table before domains sched: domain sysctl fixes: use for_each_online_cpu() sched: domain sysctl fixes: use kcalloc() Make scheduler debug file operations const sched: enable wake-idle on CONFIG_SCHED_MC=y sched: reintroduce topology.h tunings sched: allow the immediate migration of cache-cold tasks sched: debug, improve migration statistics sched: debug: increase width of debug line sched: activate task_hot() only on fair-scheduled tasks sched: reintroduce cache-hot affinity sched: speed up context-switches a bit ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/sched.h99
-rw-r--r--include/linux/topology.h5
3 files changed, 74 insertions, 31 deletions
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 43e895f1cabe..12bf44f083f5 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -23,6 +23,7 @@ struct cpu_usage_stat {
23 cputime64_t idle; 23 cputime64_t idle;
24 cputime64_t iowait; 24 cputime64_t iowait;
25 cputime64_t steal; 25 cputime64_t steal;
26 cputime64_t guest;
26}; 27};
27 28
28struct kernel_stat { 29struct kernel_stat {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 833f7dc2b8de..228e0a8ce248 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@ struct sched_param {
87#include <linux/timer.h> 87#include <linux/timer.h>
88#include <linux/hrtimer.h> 88#include <linux/hrtimer.h>
89#include <linux/task_io_accounting.h> 89#include <linux/task_io_accounting.h>
90#include <linux/kobject.h>
90 91
91#include <asm/processor.h> 92#include <asm/processor.h>
92 93
@@ -136,6 +137,7 @@ extern unsigned long weighted_cpuload(const int cpu);
136 137
137struct seq_file; 138struct seq_file;
138struct cfs_rq; 139struct cfs_rq;
140struct task_group;
139#ifdef CONFIG_SCHED_DEBUG 141#ifdef CONFIG_SCHED_DEBUG
140extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 142extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
141extern void proc_sched_set_task(struct task_struct *p); 143extern void proc_sched_set_task(struct task_struct *p);
@@ -174,8 +176,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
174#define EXIT_ZOMBIE 16 176#define EXIT_ZOMBIE 16
175#define EXIT_DEAD 32 177#define EXIT_DEAD 32
176/* in tsk->state again */ 178/* in tsk->state again */
177#define TASK_NONINTERACTIVE 64 179#define TASK_DEAD 64
178#define TASK_DEAD 128
179 180
180#define __set_task_state(tsk, state_value) \ 181#define __set_task_state(tsk, state_value) \
181 do { (tsk)->state = (state_value); } while (0) 182 do { (tsk)->state = (state_value); } while (0)
@@ -516,6 +517,8 @@ struct signal_struct {
516 * in __exit_signal, except for the group leader. 517 * in __exit_signal, except for the group leader.
517 */ 518 */
518 cputime_t utime, stime, cutime, cstime; 519 cputime_t utime, stime, cutime, cstime;
520 cputime_t gtime;
521 cputime_t cgtime;
519 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 522 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
520 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 523 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
521 unsigned long inblock, oublock, cinblock, coublock; 524 unsigned long inblock, oublock, cinblock, coublock;
@@ -596,8 +599,21 @@ struct user_struct {
596 /* Hash table maintenance information */ 599 /* Hash table maintenance information */
597 struct hlist_node uidhash_node; 600 struct hlist_node uidhash_node;
598 uid_t uid; 601 uid_t uid;
602
603#ifdef CONFIG_FAIR_USER_SCHED
604 struct task_group *tg;
605 struct kset kset;
606 struct subsys_attribute user_attr;
607 struct work_struct work;
608#endif
599}; 609};
600 610
611#ifdef CONFIG_FAIR_USER_SCHED
612extern int uids_kobject_init(void);
613#else
614static inline int uids_kobject_init(void) { return 0; }
615#endif
616
601extern struct user_struct *find_user(uid_t); 617extern struct user_struct *find_user(uid_t);
602 618
603extern struct user_struct root_user; 619extern struct user_struct root_user;
@@ -609,13 +625,17 @@ struct reclaim_state;
609#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 625#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
610struct sched_info { 626struct sched_info {
611 /* cumulative counters */ 627 /* cumulative counters */
612 unsigned long pcnt; /* # of times run on this cpu */ 628 unsigned long pcount; /* # of times run on this cpu */
613 unsigned long long cpu_time, /* time spent on the cpu */ 629 unsigned long long cpu_time, /* time spent on the cpu */
614 run_delay; /* time spent waiting on a runqueue */ 630 run_delay; /* time spent waiting on a runqueue */
615 631
616 /* timestamps */ 632 /* timestamps */
617 unsigned long long last_arrival,/* when we last ran on a cpu */ 633 unsigned long long last_arrival,/* when we last ran on a cpu */
618 last_queued; /* when we were last queued to run */ 634 last_queued; /* when we were last queued to run */
635#ifdef CONFIG_SCHEDSTATS
636 /* BKL stats */
637 unsigned long bkl_count;
638#endif
619}; 639};
620#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 640#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
621 641
@@ -750,7 +770,7 @@ struct sched_domain {
750 770
751#ifdef CONFIG_SCHEDSTATS 771#ifdef CONFIG_SCHEDSTATS
752 /* load_balance() stats */ 772 /* load_balance() stats */
753 unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; 773 unsigned long lb_count[CPU_MAX_IDLE_TYPES];
754 unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; 774 unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
755 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; 775 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
756 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; 776 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
@@ -760,17 +780,17 @@ struct sched_domain {
760 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; 780 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
761 781
762 /* Active load balancing */ 782 /* Active load balancing */
763 unsigned long alb_cnt; 783 unsigned long alb_count;
764 unsigned long alb_failed; 784 unsigned long alb_failed;
765 unsigned long alb_pushed; 785 unsigned long alb_pushed;
766 786
767 /* SD_BALANCE_EXEC stats */ 787 /* SD_BALANCE_EXEC stats */
768 unsigned long sbe_cnt; 788 unsigned long sbe_count;
769 unsigned long sbe_balanced; 789 unsigned long sbe_balanced;
770 unsigned long sbe_pushed; 790 unsigned long sbe_pushed;
771 791
772 /* SD_BALANCE_FORK stats */ 792 /* SD_BALANCE_FORK stats */
773 unsigned long sbf_cnt; 793 unsigned long sbf_count;
774 unsigned long sbf_balanced; 794 unsigned long sbf_balanced;
775 unsigned long sbf_pushed; 795 unsigned long sbf_pushed;
776 796
@@ -854,11 +874,11 @@ struct rq;
854struct sched_domain; 874struct sched_domain;
855 875
856struct sched_class { 876struct sched_class {
857 struct sched_class *next; 877 const struct sched_class *next;
858 878
859 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 879 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
860 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 880 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
861 void (*yield_task) (struct rq *rq, struct task_struct *p); 881 void (*yield_task) (struct rq *rq);
862 882
863 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 883 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
864 884
@@ -888,31 +908,22 @@ struct load_weight {
888 * 4 se->block_start 908 * 4 se->block_start
889 * 4 se->run_node 909 * 4 se->run_node
890 * 4 se->sleep_start 910 * 4 se->sleep_start
891 * 4 se->sleep_start_fair
892 * 6 se->load.weight 911 * 6 se->load.weight
893 * 7 se->delta_fair
894 * 15 se->wait_runtime
895 */ 912 */
896struct sched_entity { 913struct sched_entity {
897 long wait_runtime;
898 unsigned long delta_fair_run;
899 unsigned long delta_fair_sleep;
900 unsigned long delta_exec;
901 s64 fair_key;
902 struct load_weight load; /* for load-balancing */ 914 struct load_weight load; /* for load-balancing */
903 struct rb_node run_node; 915 struct rb_node run_node;
904 unsigned int on_rq; 916 unsigned int on_rq;
917 int peer_preempt;
905 918
906 u64 exec_start; 919 u64 exec_start;
907 u64 sum_exec_runtime; 920 u64 sum_exec_runtime;
921 u64 vruntime;
908 u64 prev_sum_exec_runtime; 922 u64 prev_sum_exec_runtime;
909 u64 wait_start_fair;
910 u64 sleep_start_fair;
911 923
912#ifdef CONFIG_SCHEDSTATS 924#ifdef CONFIG_SCHEDSTATS
913 u64 wait_start; 925 u64 wait_start;
914 u64 wait_max; 926 u64 wait_max;
915 s64 sum_wait_runtime;
916 927
917 u64 sleep_start; 928 u64 sleep_start;
918 u64 sleep_max; 929 u64 sleep_max;
@@ -921,9 +932,25 @@ struct sched_entity {
921 u64 block_start; 932 u64 block_start;
922 u64 block_max; 933 u64 block_max;
923 u64 exec_max; 934 u64 exec_max;
924 935 u64 slice_max;
925 unsigned long wait_runtime_overruns; 936
926 unsigned long wait_runtime_underruns; 937 u64 nr_migrations;
938 u64 nr_migrations_cold;
939 u64 nr_failed_migrations_affine;
940 u64 nr_failed_migrations_running;
941 u64 nr_failed_migrations_hot;
942 u64 nr_forced_migrations;
943 u64 nr_forced2_migrations;
944
945 u64 nr_wakeups;
946 u64 nr_wakeups_sync;
947 u64 nr_wakeups_migrate;
948 u64 nr_wakeups_local;
949 u64 nr_wakeups_remote;
950 u64 nr_wakeups_affine;
951 u64 nr_wakeups_affine_attempts;
952 u64 nr_wakeups_passive;
953 u64 nr_wakeups_idle;
927#endif 954#endif
928 955
929#ifdef CONFIG_FAIR_GROUP_SCHED 956#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -952,7 +979,7 @@ struct task_struct {
952 979
953 int prio, static_prio, normal_prio; 980 int prio, static_prio, normal_prio;
954 struct list_head run_list; 981 struct list_head run_list;
955 struct sched_class *sched_class; 982 const struct sched_class *sched_class;
956 struct sched_entity se; 983 struct sched_entity se;
957 984
958#ifdef CONFIG_PREEMPT_NOTIFIERS 985#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -1023,6 +1050,7 @@ struct task_struct {
1023 1050
1024 unsigned int rt_priority; 1051 unsigned int rt_priority;
1025 cputime_t utime, stime; 1052 cputime_t utime, stime;
1053 cputime_t gtime;
1026 unsigned long nvcsw, nivcsw; /* context switch counts */ 1054 unsigned long nvcsw, nivcsw; /* context switch counts */
1027 struct timespec start_time; /* monotonic time */ 1055 struct timespec start_time; /* monotonic time */
1028 struct timespec real_start_time; /* boot based time */ 1056 struct timespec real_start_time; /* boot based time */
@@ -1314,6 +1342,7 @@ static inline void put_task_struct(struct task_struct *t)
1314#define PF_STARTING 0x00000002 /* being created */ 1342#define PF_STARTING 0x00000002 /* being created */
1315#define PF_EXITING 0x00000004 /* getting shut down */ 1343#define PF_EXITING 0x00000004 /* getting shut down */
1316#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1344#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1345#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1317#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1346#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1318#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1347#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1319#define PF_DUMPCORE 0x00000200 /* dumped core */ 1348#define PF_DUMPCORE 0x00000200 /* dumped core */
@@ -1401,15 +1430,17 @@ static inline void idle_task_exit(void) {}
1401 1430
1402extern void sched_idle_next(void); 1431extern void sched_idle_next(void);
1403 1432
1433#ifdef CONFIG_SCHED_DEBUG
1404extern unsigned int sysctl_sched_latency; 1434extern unsigned int sysctl_sched_latency;
1405extern unsigned int sysctl_sched_min_granularity; 1435extern unsigned int sysctl_sched_nr_latency;
1406extern unsigned int sysctl_sched_wakeup_granularity; 1436extern unsigned int sysctl_sched_wakeup_granularity;
1407extern unsigned int sysctl_sched_batch_wakeup_granularity; 1437extern unsigned int sysctl_sched_batch_wakeup_granularity;
1408extern unsigned int sysctl_sched_stat_granularity;
1409extern unsigned int sysctl_sched_runtime_limit;
1410extern unsigned int sysctl_sched_compat_yield;
1411extern unsigned int sysctl_sched_child_runs_first; 1438extern unsigned int sysctl_sched_child_runs_first;
1412extern unsigned int sysctl_sched_features; 1439extern unsigned int sysctl_sched_features;
1440extern unsigned int sysctl_sched_migration_cost;
1441#endif
1442
1443extern unsigned int sysctl_sched_compat_yield;
1413 1444
1414#ifdef CONFIG_RT_MUTEXES 1445#ifdef CONFIG_RT_MUTEXES
1415extern int rt_mutex_getprio(struct task_struct *p); 1446extern int rt_mutex_getprio(struct task_struct *p);
@@ -1843,6 +1874,18 @@ extern int sched_mc_power_savings, sched_smt_power_savings;
1843 1874
1844extern void normalize_rt_tasks(void); 1875extern void normalize_rt_tasks(void);
1845 1876
1877#ifdef CONFIG_FAIR_GROUP_SCHED
1878
1879extern struct task_group init_task_group;
1880
1881extern struct task_group *sched_create_group(void);
1882extern void sched_destroy_group(struct task_group *tg);
1883extern void sched_move_task(struct task_struct *tsk);
1884extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
1885extern unsigned long sched_group_shares(struct task_group *tg);
1886
1887#endif
1888
1846#ifdef CONFIG_TASK_XACCT 1889#ifdef CONFIG_TASK_XACCT
1847static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 1890static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
1848{ 1891{
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 525d437b1253..47729f18bfdf 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -159,15 +159,14 @@
159 .imbalance_pct = 125, \ 159 .imbalance_pct = 125, \
160 .cache_nice_tries = 1, \ 160 .cache_nice_tries = 1, \
161 .busy_idx = 2, \ 161 .busy_idx = 2, \
162 .idle_idx = 0, \ 162 .idle_idx = 1, \
163 .newidle_idx = 0, \ 163 .newidle_idx = 2, \
164 .wake_idx = 1, \ 164 .wake_idx = 1, \
165 .forkexec_idx = 1, \ 165 .forkexec_idx = 1, \
166 .flags = SD_LOAD_BALANCE \ 166 .flags = SD_LOAD_BALANCE \
167 | SD_BALANCE_NEWIDLE \ 167 | SD_BALANCE_NEWIDLE \
168 | SD_BALANCE_EXEC \ 168 | SD_BALANCE_EXEC \
169 | SD_WAKE_AFFINE \ 169 | SD_WAKE_AFFINE \
170 | SD_WAKE_IDLE \
171 | BALANCE_FOR_PKG_POWER,\ 170 | BALANCE_FOR_PKG_POWER,\
172 .last_balance = jiffies, \ 171 .last_balance = jiffies, \
173 .balance_interval = 1, \ 172 .balance_interval = 1, \