diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:08:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:08:01 -0400 |
commit | 79071638ce655c1f78a50d05c7dae0ad04a3e92a (patch) | |
tree | d9e76997c418b78a2485ac50d5970f7d420a5600 /include/linux/sched.h | |
parent | 44a6b8442190cf213081060b610dae2e822f802b (diff) | |
parent | 8323f26ce3425460769605a6aece7a174edaa7d1 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The biggest change is a performance improvement on SMP systems:
| 4 socket 40 core + SMT Westmere box, single 30 sec tbench
| runs, higher is better:
|
| clients 1 2 4 8 16 32 64 128
|..........................................................................
| pre 30 41 118 645 3769 6214 12233 14312
| post 299 603 1211 2418 4697 6847 11606 14557
|
| A nice increase in performance.
which speedup is particularly noticeable on heavily interacting
few-tasks workloads, so the changes should help desktop-style Xorg
workloads and interactivity as well, on multi-core CPUs.
There are also cpuset suspend behavior fixes/restructuring and various
smaller tweaks."
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix race in task_group()
sched: Improve balance_cpu() to consider other cpus in its group as target of (pinned) task
sched: Reset loop counters if all tasks are pinned and we need to redo load balance
sched: Reorder 'struct lb_env' members to reduce its size
sched: Improve scalability via 'CPU buddies', which withstand random perturbations
cpusets: Remove/update outdated comments
cpusets, hotplug: Restructure functions that are invoked during hotplug
cpusets, hotplug: Implement cpuset tree traversal in a helper function
CPU hotplug, cpusets, suspend: Don't modify cpusets during suspend/resume
sched/x86: Remove broken power estimation
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1a2ebd39b800..a721cef7e2d4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -949,6 +949,7 @@ struct sched_domain { | |||
949 | unsigned int smt_gain; | 949 | unsigned int smt_gain; |
950 | int flags; /* See SD_* */ | 950 | int flags; /* See SD_* */ |
951 | int level; | 951 | int level; |
952 | int idle_buddy; /* cpu assigned to select_idle_sibling() */ | ||
952 | 953 | ||
953 | /* Runtime fields. */ | 954 | /* Runtime fields. */ |
954 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 955 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -1244,6 +1245,9 @@ struct task_struct { | |||
1244 | const struct sched_class *sched_class; | 1245 | const struct sched_class *sched_class; |
1245 | struct sched_entity se; | 1246 | struct sched_entity se; |
1246 | struct sched_rt_entity rt; | 1247 | struct sched_rt_entity rt; |
1248 | #ifdef CONFIG_CGROUP_SCHED | ||
1249 | struct task_group *sched_task_group; | ||
1250 | #endif | ||
1247 | 1251 | ||
1248 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 1252 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
1249 | /* list of struct preempt_notifier: */ | 1253 | /* list of struct preempt_notifier: */ |
@@ -2721,7 +2725,7 @@ extern int sched_group_set_rt_period(struct task_group *tg, | |||
2721 | extern long sched_group_rt_period(struct task_group *tg); | 2725 | extern long sched_group_rt_period(struct task_group *tg); |
2722 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | 2726 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); |
2723 | #endif | 2727 | #endif |
2724 | #endif | 2728 | #endif /* CONFIG_CGROUP_SCHED */ |
2725 | 2729 | ||
2726 | extern int task_can_switch_user(struct user_struct *up, | 2730 | extern int task_can_switch_user(struct user_struct *up, |
2727 | struct task_struct *tsk); | 2731 | struct task_struct *tsk); |