diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1912 |
1 files changed, 1568 insertions, 344 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8dcdec6fe0fe..57ba7ea9b744 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -66,6 +66,10 @@ | |||
66 | #include <linux/unistd.h> | 66 | #include <linux/unistd.h> |
67 | #include <linux/pagemap.h> | 67 | #include <linux/pagemap.h> |
68 | #include <linux/hrtimer.h> | 68 | #include <linux/hrtimer.h> |
69 | #include <linux/tick.h> | ||
70 | #include <linux/bootmem.h> | ||
71 | #include <linux/debugfs.h> | ||
72 | #include <linux/ctype.h> | ||
69 | 73 | ||
70 | #include <asm/tlb.h> | 74 | #include <asm/tlb.h> |
71 | #include <asm/irq_regs.h> | 75 | #include <asm/irq_regs.h> |
@@ -114,6 +118,11 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
114 | */ | 118 | */ |
115 | #define DEF_TIMESLICE (100 * HZ / 1000) | 119 | #define DEF_TIMESLICE (100 * HZ / 1000) |
116 | 120 | ||
121 | /* | ||
122 | * single value that denotes runtime == period, ie unlimited time. | ||
123 | */ | ||
124 | #define RUNTIME_INF ((u64)~0ULL) | ||
125 | |||
117 | #ifdef CONFIG_SMP | 126 | #ifdef CONFIG_SMP |
118 | /* | 127 | /* |
119 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 128 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
@@ -155,6 +164,84 @@ struct rt_prio_array { | |||
155 | struct list_head queue[MAX_RT_PRIO]; | 164 | struct list_head queue[MAX_RT_PRIO]; |
156 | }; | 165 | }; |
157 | 166 | ||
167 | struct rt_bandwidth { | ||
168 | /* nests inside the rq lock: */ | ||
169 | spinlock_t rt_runtime_lock; | ||
170 | ktime_t rt_period; | ||
171 | u64 rt_runtime; | ||
172 | struct hrtimer rt_period_timer; | ||
173 | }; | ||
174 | |||
175 | static struct rt_bandwidth def_rt_bandwidth; | ||
176 | |||
177 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); | ||
178 | |||
179 | static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) | ||
180 | { | ||
181 | struct rt_bandwidth *rt_b = | ||
182 | container_of(timer, struct rt_bandwidth, rt_period_timer); | ||
183 | ktime_t now; | ||
184 | int overrun; | ||
185 | int idle = 0; | ||
186 | |||
187 | for (;;) { | ||
188 | now = hrtimer_cb_get_time(timer); | ||
189 | overrun = hrtimer_forward(timer, now, rt_b->rt_period); | ||
190 | |||
191 | if (!overrun) | ||
192 | break; | ||
193 | |||
194 | idle = do_sched_rt_period_timer(rt_b, overrun); | ||
195 | } | ||
196 | |||
197 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; | ||
198 | } | ||
199 | |||
200 | static | ||
201 | void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | ||
202 | { | ||
203 | rt_b->rt_period = ns_to_ktime(period); | ||
204 | rt_b->rt_runtime = runtime; | ||
205 | |||
206 | spin_lock_init(&rt_b->rt_runtime_lock); | ||
207 | |||
208 | hrtimer_init(&rt_b->rt_period_timer, | ||
209 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
210 | rt_b->rt_period_timer.function = sched_rt_period_timer; | ||
211 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | ||
212 | } | ||
213 | |||
214 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | ||
215 | { | ||
216 | ktime_t now; | ||
217 | |||
218 | if (rt_b->rt_runtime == RUNTIME_INF) | ||
219 | return; | ||
220 | |||
221 | if (hrtimer_active(&rt_b->rt_period_timer)) | ||
222 | return; | ||
223 | |||
224 | spin_lock(&rt_b->rt_runtime_lock); | ||
225 | for (;;) { | ||
226 | if (hrtimer_active(&rt_b->rt_period_timer)) | ||
227 | break; | ||
228 | |||
229 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); | ||
230 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); | ||
231 | hrtimer_start(&rt_b->rt_period_timer, | ||
232 | rt_b->rt_period_timer.expires, | ||
233 | HRTIMER_MODE_ABS); | ||
234 | } | ||
235 | spin_unlock(&rt_b->rt_runtime_lock); | ||
236 | } | ||
237 | |||
238 | #ifdef CONFIG_RT_GROUP_SCHED | ||
239 | static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) | ||
240 | { | ||
241 | hrtimer_cancel(&rt_b->rt_period_timer); | ||
242 | } | ||
243 | #endif | ||
244 | |||
158 | #ifdef CONFIG_GROUP_SCHED | 245 | #ifdef CONFIG_GROUP_SCHED |
159 | 246 | ||
160 | #include <linux/cgroup.h> | 247 | #include <linux/cgroup.h> |
@@ -181,29 +268,39 @@ struct task_group { | |||
181 | struct sched_rt_entity **rt_se; | 268 | struct sched_rt_entity **rt_se; |
182 | struct rt_rq **rt_rq; | 269 | struct rt_rq **rt_rq; |
183 | 270 | ||
184 | u64 rt_runtime; | 271 | struct rt_bandwidth rt_bandwidth; |
185 | #endif | 272 | #endif |
186 | 273 | ||
187 | struct rcu_head rcu; | 274 | struct rcu_head rcu; |
188 | struct list_head list; | 275 | struct list_head list; |
276 | |||
277 | struct task_group *parent; | ||
278 | struct list_head siblings; | ||
279 | struct list_head children; | ||
189 | }; | 280 | }; |
190 | 281 | ||
282 | #ifdef CONFIG_USER_SCHED | ||
283 | |||
284 | /* | ||
285 | * Root task group. | ||
286 | * Every UID task group (including init_task_group aka UID-0) will | ||
287 | * be a child to this group. | ||
288 | */ | ||
289 | struct task_group root_task_group; | ||
290 | |||
191 | #ifdef CONFIG_FAIR_GROUP_SCHED | 291 | #ifdef CONFIG_FAIR_GROUP_SCHED |
192 | /* Default task group's sched entity on each cpu */ | 292 | /* Default task group's sched entity on each cpu */ |
193 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | 293 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); |
194 | /* Default task group's cfs_rq on each cpu */ | 294 | /* Default task group's cfs_rq on each cpu */ |
195 | static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | 295 | static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; |
196 | |||
197 | static struct sched_entity *init_sched_entity_p[NR_CPUS]; | ||
198 | static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; | ||
199 | #endif | 296 | #endif |
200 | 297 | ||
201 | #ifdef CONFIG_RT_GROUP_SCHED | 298 | #ifdef CONFIG_RT_GROUP_SCHED |
202 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 299 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
203 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 300 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
204 | 301 | #endif | |
205 | static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS]; | 302 | #else |
206 | static struct rt_rq *init_rt_rq_p[NR_CPUS]; | 303 | #define root_task_group init_task_group |
207 | #endif | 304 | #endif |
208 | 305 | ||
209 | /* task_group_lock serializes add/remove of task groups and also changes to | 306 | /* task_group_lock serializes add/remove of task groups and also changes to |
@@ -221,23 +318,15 @@ static DEFINE_MUTEX(doms_cur_mutex); | |||
221 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 318 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
222 | #endif | 319 | #endif |
223 | 320 | ||
321 | #define MIN_SHARES 2 | ||
322 | |||
224 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 323 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
225 | #endif | 324 | #endif |
226 | 325 | ||
227 | /* Default task group. | 326 | /* Default task group. |
228 | * Every task in system belong to this group at bootup. | 327 | * Every task in system belong to this group at bootup. |
229 | */ | 328 | */ |
230 | struct task_group init_task_group = { | 329 | struct task_group init_task_group; |
231 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
232 | .se = init_sched_entity_p, | ||
233 | .cfs_rq = init_cfs_rq_p, | ||
234 | #endif | ||
235 | |||
236 | #ifdef CONFIG_RT_GROUP_SCHED | ||
237 | .rt_se = init_sched_rt_entity_p, | ||
238 | .rt_rq = init_rt_rq_p, | ||
239 | #endif | ||
240 | }; | ||
241 | 330 | ||
242 | /* return group to which a task belongs */ | 331 | /* return group to which a task belongs */ |
243 | static inline struct task_group *task_group(struct task_struct *p) | 332 | static inline struct task_group *task_group(struct task_struct *p) |
@@ -297,8 +386,12 @@ struct cfs_rq { | |||
297 | 386 | ||
298 | struct rb_root tasks_timeline; | 387 | struct rb_root tasks_timeline; |
299 | struct rb_node *rb_leftmost; | 388 | struct rb_node *rb_leftmost; |
300 | struct rb_node *rb_load_balance_curr; | 389 | |
301 | /* 'curr' points to currently running entity on this cfs_rq. | 390 | struct list_head tasks; |
391 | struct list_head *balance_iterator; | ||
392 | |||
393 | /* | ||
394 | * 'curr' points to currently running entity on this cfs_rq. | ||
302 | * It is set to NULL otherwise (i.e when none are currently running). | 395 | * It is set to NULL otherwise (i.e when none are currently running). |
303 | */ | 396 | */ |
304 | struct sched_entity *curr, *next; | 397 | struct sched_entity *curr, *next; |
@@ -318,6 +411,43 @@ struct cfs_rq { | |||
318 | */ | 411 | */ |
319 | struct list_head leaf_cfs_rq_list; | 412 | struct list_head leaf_cfs_rq_list; |
320 | struct task_group *tg; /* group that "owns" this runqueue */ | 413 | struct task_group *tg; /* group that "owns" this runqueue */ |
414 | |||
415 | #ifdef CONFIG_SMP | ||
416 | unsigned long task_weight; | ||
417 | unsigned long shares; | ||
418 | /* | ||
419 | * We need space to build a sched_domain wide view of the full task | ||
420 | * group tree, in order to avoid depending on dynamic memory allocation | ||
421 | * during the load balancing we place this in the per cpu task group | ||
422 | * hierarchy. This limits the load balancing to one instance per cpu, | ||
423 | * but more should not be needed anyway. | ||
424 | */ | ||
425 | struct aggregate_struct { | ||
426 | /* | ||
427 | * load = weight(cpus) * f(tg) | ||
428 | * | ||
429 | * Where f(tg) is the recursive weight fraction assigned to | ||
430 | * this group. | ||
431 | */ | ||
432 | unsigned long load; | ||
433 | |||
434 | /* | ||
435 | * part of the group weight distributed to this span. | ||
436 | */ | ||
437 | unsigned long shares; | ||
438 | |||
439 | /* | ||
440 | * The sum of all runqueue weights within this span. | ||
441 | */ | ||
442 | unsigned long rq_weight; | ||
443 | |||
444 | /* | ||
445 | * Weight contributed by tasks; this is the part we can | ||
446 | * influence by moving tasks around. | ||
447 | */ | ||
448 | unsigned long task_weight; | ||
449 | } aggregate; | ||
450 | #endif | ||
321 | #endif | 451 | #endif |
322 | }; | 452 | }; |
323 | 453 | ||
@@ -334,6 +464,9 @@ struct rt_rq { | |||
334 | #endif | 464 | #endif |
335 | int rt_throttled; | 465 | int rt_throttled; |
336 | u64 rt_time; | 466 | u64 rt_time; |
467 | u64 rt_runtime; | ||
468 | /* Nests inside the rq lock: */ | ||
469 | spinlock_t rt_runtime_lock; | ||
337 | 470 | ||
338 | #ifdef CONFIG_RT_GROUP_SCHED | 471 | #ifdef CONFIG_RT_GROUP_SCHED |
339 | unsigned long rt_nr_boosted; | 472 | unsigned long rt_nr_boosted; |
@@ -396,6 +529,7 @@ struct rq { | |||
396 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 529 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
397 | unsigned char idle_at_tick; | 530 | unsigned char idle_at_tick; |
398 | #ifdef CONFIG_NO_HZ | 531 | #ifdef CONFIG_NO_HZ |
532 | unsigned long last_tick_seen; | ||
399 | unsigned char in_nohz_recently; | 533 | unsigned char in_nohz_recently; |
400 | #endif | 534 | #endif |
401 | /* capture load from *all* tasks on this cpu: */ | 535 | /* capture load from *all* tasks on this cpu: */ |
@@ -405,8 +539,6 @@ struct rq { | |||
405 | 539 | ||
406 | struct cfs_rq cfs; | 540 | struct cfs_rq cfs; |
407 | struct rt_rq rt; | 541 | struct rt_rq rt; |
408 | u64 rt_period_expire; | ||
409 | int rt_throttled; | ||
410 | 542 | ||
411 | #ifdef CONFIG_FAIR_GROUP_SCHED | 543 | #ifdef CONFIG_FAIR_GROUP_SCHED |
412 | /* list of leaf cfs_rq on this cpu: */ | 544 | /* list of leaf cfs_rq on this cpu: */ |
@@ -499,6 +631,32 @@ static inline int cpu_of(struct rq *rq) | |||
499 | #endif | 631 | #endif |
500 | } | 632 | } |
501 | 633 | ||
634 | #ifdef CONFIG_NO_HZ | ||
635 | static inline bool nohz_on(int cpu) | ||
636 | { | ||
637 | return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE; | ||
638 | } | ||
639 | |||
640 | static inline u64 max_skipped_ticks(struct rq *rq) | ||
641 | { | ||
642 | return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1; | ||
643 | } | ||
644 | |||
645 | static inline void update_last_tick_seen(struct rq *rq) | ||
646 | { | ||
647 | rq->last_tick_seen = jiffies; | ||
648 | } | ||
649 | #else | ||
650 | static inline u64 max_skipped_ticks(struct rq *rq) | ||
651 | { | ||
652 | return 1; | ||
653 | } | ||
654 | |||
655 | static inline void update_last_tick_seen(struct rq *rq) | ||
656 | { | ||
657 | } | ||
658 | #endif | ||
659 | |||
502 | /* | 660 | /* |
503 | * Update the per-runqueue clock, as finegrained as the platform can give | 661 | * Update the per-runqueue clock, as finegrained as the platform can give |
504 | * us, but without assuming monotonicity, etc.: | 662 | * us, but without assuming monotonicity, etc.: |
@@ -523,9 +681,12 @@ static void __update_rq_clock(struct rq *rq) | |||
523 | /* | 681 | /* |
524 | * Catch too large forward jumps too: | 682 | * Catch too large forward jumps too: |
525 | */ | 683 | */ |
526 | if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { | 684 | u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC; |
527 | if (clock < rq->tick_timestamp + TICK_NSEC) | 685 | u64 max_time = rq->tick_timestamp + max_jump; |
528 | clock = rq->tick_timestamp + TICK_NSEC; | 686 | |
687 | if (unlikely(clock + delta > max_time)) { | ||
688 | if (clock < max_time) | ||
689 | clock = max_time; | ||
529 | else | 690 | else |
530 | clock++; | 691 | clock++; |
531 | rq->clock_overflows++; | 692 | rq->clock_overflows++; |
@@ -561,23 +722,6 @@ static void update_rq_clock(struct rq *rq) | |||
561 | #define task_rq(p) cpu_rq(task_cpu(p)) | 722 | #define task_rq(p) cpu_rq(task_cpu(p)) |
562 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 723 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
563 | 724 | ||
564 | unsigned long rt_needs_cpu(int cpu) | ||
565 | { | ||
566 | struct rq *rq = cpu_rq(cpu); | ||
567 | u64 delta; | ||
568 | |||
569 | if (!rq->rt_throttled) | ||
570 | return 0; | ||
571 | |||
572 | if (rq->clock > rq->rt_period_expire) | ||
573 | return 1; | ||
574 | |||
575 | delta = rq->rt_period_expire - rq->clock; | ||
576 | do_div(delta, NSEC_PER_SEC / HZ); | ||
577 | |||
578 | return (unsigned long)delta; | ||
579 | } | ||
580 | |||
581 | /* | 725 | /* |
582 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: | 726 | * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
583 | */ | 727 | */ |
@@ -590,22 +734,137 @@ unsigned long rt_needs_cpu(int cpu) | |||
590 | /* | 734 | /* |
591 | * Debugging: various feature bits | 735 | * Debugging: various feature bits |
592 | */ | 736 | */ |
737 | |||
738 | #define SCHED_FEAT(name, enabled) \ | ||
739 | __SCHED_FEAT_##name , | ||
740 | |||
593 | enum { | 741 | enum { |
594 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, | 742 | #include "sched_features.h" |
595 | SCHED_FEAT_WAKEUP_PREEMPT = 2, | ||
596 | SCHED_FEAT_START_DEBIT = 4, | ||
597 | SCHED_FEAT_HRTICK = 8, | ||
598 | SCHED_FEAT_DOUBLE_TICK = 16, | ||
599 | }; | 743 | }; |
600 | 744 | ||
745 | #undef SCHED_FEAT | ||
746 | |||
747 | #define SCHED_FEAT(name, enabled) \ | ||
748 | (1UL << __SCHED_FEAT_##name) * enabled | | ||
749 | |||
601 | const_debug unsigned int sysctl_sched_features = | 750 | const_debug unsigned int sysctl_sched_features = |
602 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | | 751 | #include "sched_features.h" |
603 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | | 752 | 0; |
604 | SCHED_FEAT_START_DEBIT * 1 | | 753 | |
605 | SCHED_FEAT_HRTICK * 1 | | 754 | #undef SCHED_FEAT |
606 | SCHED_FEAT_DOUBLE_TICK * 0; | 755 | |
756 | #ifdef CONFIG_SCHED_DEBUG | ||
757 | #define SCHED_FEAT(name, enabled) \ | ||
758 | #name , | ||
759 | |||
760 | __read_mostly char *sched_feat_names[] = { | ||
761 | #include "sched_features.h" | ||
762 | NULL | ||
763 | }; | ||
764 | |||
765 | #undef SCHED_FEAT | ||
766 | |||
767 | int sched_feat_open(struct inode *inode, struct file *filp) | ||
768 | { | ||
769 | filp->private_data = inode->i_private; | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static ssize_t | ||
774 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
775 | size_t cnt, loff_t *ppos) | ||
776 | { | ||
777 | char *buf; | ||
778 | int r = 0; | ||
779 | int len = 0; | ||
780 | int i; | ||
607 | 781 | ||
608 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 782 | for (i = 0; sched_feat_names[i]; i++) { |
783 | len += strlen(sched_feat_names[i]); | ||
784 | len += 4; | ||
785 | } | ||
786 | |||
787 | buf = kmalloc(len + 2, GFP_KERNEL); | ||
788 | if (!buf) | ||
789 | return -ENOMEM; | ||
790 | |||
791 | for (i = 0; sched_feat_names[i]; i++) { | ||
792 | if (sysctl_sched_features & (1UL << i)) | ||
793 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
794 | else | ||
795 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
796 | } | ||
797 | |||
798 | r += sprintf(buf + r, "\n"); | ||
799 | WARN_ON(r >= len + 2); | ||
800 | |||
801 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
802 | |||
803 | kfree(buf); | ||
804 | |||
805 | return r; | ||
806 | } | ||
807 | |||
808 | static ssize_t | ||
809 | sched_feat_write(struct file *filp, const char __user *ubuf, | ||
810 | size_t cnt, loff_t *ppos) | ||
811 | { | ||
812 | char buf[64]; | ||
813 | char *cmp = buf; | ||
814 | int neg = 0; | ||
815 | int i; | ||
816 | |||
817 | if (cnt > 63) | ||
818 | cnt = 63; | ||
819 | |||
820 | if (copy_from_user(&buf, ubuf, cnt)) | ||
821 | return -EFAULT; | ||
822 | |||
823 | buf[cnt] = 0; | ||
824 | |||
825 | if (strncmp(buf, "NO_", 3) == 0) { | ||
826 | neg = 1; | ||
827 | cmp += 3; | ||
828 | } | ||
829 | |||
830 | for (i = 0; sched_feat_names[i]; i++) { | ||
831 | int len = strlen(sched_feat_names[i]); | ||
832 | |||
833 | if (strncmp(cmp, sched_feat_names[i], len) == 0) { | ||
834 | if (neg) | ||
835 | sysctl_sched_features &= ~(1UL << i); | ||
836 | else | ||
837 | sysctl_sched_features |= (1UL << i); | ||
838 | break; | ||
839 | } | ||
840 | } | ||
841 | |||
842 | if (!sched_feat_names[i]) | ||
843 | return -EINVAL; | ||
844 | |||
845 | filp->f_pos += cnt; | ||
846 | |||
847 | return cnt; | ||
848 | } | ||
849 | |||
850 | static struct file_operations sched_feat_fops = { | ||
851 | .open = sched_feat_open, | ||
852 | .read = sched_feat_read, | ||
853 | .write = sched_feat_write, | ||
854 | }; | ||
855 | |||
856 | static __init int sched_init_debug(void) | ||
857 | { | ||
858 | debugfs_create_file("sched_features", 0644, NULL, NULL, | ||
859 | &sched_feat_fops); | ||
860 | |||
861 | return 0; | ||
862 | } | ||
863 | late_initcall(sched_init_debug); | ||
864 | |||
865 | #endif | ||
866 | |||
867 | #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) | ||
609 | 868 | ||
610 | /* | 869 | /* |
611 | * Number of tasks to iterate in a single balance run. | 870 | * Number of tasks to iterate in a single balance run. |
@@ -627,16 +886,52 @@ static __read_mostly int scheduler_running; | |||
627 | */ | 886 | */ |
628 | int sysctl_sched_rt_runtime = 950000; | 887 | int sysctl_sched_rt_runtime = 950000; |
629 | 888 | ||
630 | /* | 889 | static inline u64 global_rt_period(void) |
631 | * single value that denotes runtime == period, ie unlimited time. | 890 | { |
632 | */ | 891 | return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
633 | #define RUNTIME_INF ((u64)~0ULL) | 892 | } |
893 | |||
894 | static inline u64 global_rt_runtime(void) | ||
895 | { | ||
896 | if (sysctl_sched_rt_period < 0) | ||
897 | return RUNTIME_INF; | ||
898 | |||
899 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | ||
900 | } | ||
901 | |||
902 | static const unsigned long long time_sync_thresh = 100000; | ||
903 | |||
904 | static DEFINE_PER_CPU(unsigned long long, time_offset); | ||
905 | static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); | ||
634 | 906 | ||
635 | /* | 907 | /* |
636 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 908 | * Global lock which we take every now and then to synchronize |
637 | * clock constructed from sched_clock(): | 909 | * the CPUs time. This method is not warp-safe, but it's good |
910 | * enough to synchronize slowly diverging time sources and thus | ||
911 | * it's good enough for tracing: | ||
638 | */ | 912 | */ |
639 | unsigned long long cpu_clock(int cpu) | 913 | static DEFINE_SPINLOCK(time_sync_lock); |
914 | static unsigned long long prev_global_time; | ||
915 | |||
916 | static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) | ||
917 | { | ||
918 | unsigned long flags; | ||
919 | |||
920 | spin_lock_irqsave(&time_sync_lock, flags); | ||
921 | |||
922 | if (time < prev_global_time) { | ||
923 | per_cpu(time_offset, cpu) += prev_global_time - time; | ||
924 | time = prev_global_time; | ||
925 | } else { | ||
926 | prev_global_time = time; | ||
927 | } | ||
928 | |||
929 | spin_unlock_irqrestore(&time_sync_lock, flags); | ||
930 | |||
931 | return time; | ||
932 | } | ||
933 | |||
934 | static unsigned long long __cpu_clock(int cpu) | ||
640 | { | 935 | { |
641 | unsigned long long now; | 936 | unsigned long long now; |
642 | unsigned long flags; | 937 | unsigned long flags; |
@@ -657,6 +952,24 @@ unsigned long long cpu_clock(int cpu) | |||
657 | 952 | ||
658 | return now; | 953 | return now; |
659 | } | 954 | } |
955 | |||
956 | /* | ||
957 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
958 | * clock constructed from sched_clock(): | ||
959 | */ | ||
960 | unsigned long long cpu_clock(int cpu) | ||
961 | { | ||
962 | unsigned long long prev_cpu_time, time, delta_time; | ||
963 | |||
964 | prev_cpu_time = per_cpu(prev_cpu_time, cpu); | ||
965 | time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); | ||
966 | delta_time = time-prev_cpu_time; | ||
967 | |||
968 | if (unlikely(delta_time > time_sync_thresh)) | ||
969 | time = __sync_cpu_clock(time, cpu); | ||
970 | |||
971 | return time; | ||
972 | } | ||
660 | EXPORT_SYMBOL_GPL(cpu_clock); | 973 | EXPORT_SYMBOL_GPL(cpu_clock); |
661 | 974 | ||
662 | #ifndef prepare_arch_switch | 975 | #ifndef prepare_arch_switch |
@@ -1116,6 +1429,9 @@ static void __resched_task(struct task_struct *p, int tif_bit) | |||
1116 | */ | 1429 | */ |
1117 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 1430 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
1118 | 1431 | ||
1432 | /* | ||
1433 | * delta *= weight / lw | ||
1434 | */ | ||
1119 | static unsigned long | 1435 | static unsigned long |
1120 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 1436 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
1121 | struct load_weight *lw) | 1437 | struct load_weight *lw) |
@@ -1138,12 +1454,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1138 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 1454 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
1139 | } | 1455 | } |
1140 | 1456 | ||
1141 | static inline unsigned long | ||
1142 | calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) | ||
1143 | { | ||
1144 | return calc_delta_mine(delta_exec, NICE_0_LOAD, lw); | ||
1145 | } | ||
1146 | |||
1147 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | 1457 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
1148 | { | 1458 | { |
1149 | lw->weight += inc; | 1459 | lw->weight += inc; |
@@ -1241,11 +1551,390 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime); | |||
1241 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | 1551 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
1242 | #endif | 1552 | #endif |
1243 | 1553 | ||
1554 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) | ||
1555 | { | ||
1556 | update_load_add(&rq->load, load); | ||
1557 | } | ||
1558 | |||
1559 | static inline void dec_cpu_load(struct rq *rq, unsigned long load) | ||
1560 | { | ||
1561 | update_load_sub(&rq->load, load); | ||
1562 | } | ||
1563 | |||
1244 | #ifdef CONFIG_SMP | 1564 | #ifdef CONFIG_SMP |
1245 | static unsigned long source_load(int cpu, int type); | 1565 | static unsigned long source_load(int cpu, int type); |
1246 | static unsigned long target_load(int cpu, int type); | 1566 | static unsigned long target_load(int cpu, int type); |
1247 | static unsigned long cpu_avg_load_per_task(int cpu); | 1567 | static unsigned long cpu_avg_load_per_task(int cpu); |
1248 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | 1568 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
1569 | |||
1570 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1571 | |||
1572 | /* | ||
1573 | * Group load balancing. | ||
1574 | * | ||
1575 | * We calculate a few balance domain wide aggregate numbers; load and weight. | ||
1576 | * Given the pictures below, and assuming each item has equal weight: | ||
1577 | * | ||
1578 | * root 1 - thread | ||
1579 | * / | \ A - group | ||
1580 | * A 1 B | ||
1581 | * /|\ / \ | ||
1582 | * C 2 D 3 4 | ||
1583 | * | | | ||
1584 | * 5 6 | ||
1585 | * | ||
1586 | * load: | ||
1587 | * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd, | ||
1588 | * which equals 1/9-th of the total load. | ||
1589 | * | ||
1590 | * shares: | ||
1591 | * The weight of this group on the selected cpus. | ||
1592 | * | ||
1593 | * rq_weight: | ||
1594 | * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while | ||
1595 | * B would get 2. | ||
1596 | * | ||
1597 | * task_weight: | ||
1598 | * Part of the rq_weight contributed by tasks; all groups except B would | ||
1599 | * get 1, B gets 2. | ||
1600 | */ | ||
1601 | |||
1602 | static inline struct aggregate_struct * | ||
1603 | aggregate(struct task_group *tg, struct sched_domain *sd) | ||
1604 | { | ||
1605 | return &tg->cfs_rq[sd->first_cpu]->aggregate; | ||
1606 | } | ||
1607 | |||
1608 | typedef void (*aggregate_func)(struct task_group *, struct sched_domain *); | ||
1609 | |||
1610 | /* | ||
1611 | * Iterate the full tree, calling @down when first entering a node and @up when | ||
1612 | * leaving it for the final time. | ||
1613 | */ | ||
1614 | static | ||
1615 | void aggregate_walk_tree(aggregate_func down, aggregate_func up, | ||
1616 | struct sched_domain *sd) | ||
1617 | { | ||
1618 | struct task_group *parent, *child; | ||
1619 | |||
1620 | rcu_read_lock(); | ||
1621 | parent = &root_task_group; | ||
1622 | down: | ||
1623 | (*down)(parent, sd); | ||
1624 | list_for_each_entry_rcu(child, &parent->children, siblings) { | ||
1625 | parent = child; | ||
1626 | goto down; | ||
1627 | |||
1628 | up: | ||
1629 | continue; | ||
1630 | } | ||
1631 | (*up)(parent, sd); | ||
1632 | |||
1633 | child = parent; | ||
1634 | parent = parent->parent; | ||
1635 | if (parent) | ||
1636 | goto up; | ||
1637 | rcu_read_unlock(); | ||
1638 | } | ||
1639 | |||
1640 | /* | ||
1641 | * Calculate the aggregate runqueue weight. | ||
1642 | */ | ||
1643 | static | ||
1644 | void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) | ||
1645 | { | ||
1646 | unsigned long rq_weight = 0; | ||
1647 | unsigned long task_weight = 0; | ||
1648 | int i; | ||
1649 | |||
1650 | for_each_cpu_mask(i, sd->span) { | ||
1651 | rq_weight += tg->cfs_rq[i]->load.weight; | ||
1652 | task_weight += tg->cfs_rq[i]->task_weight; | ||
1653 | } | ||
1654 | |||
1655 | aggregate(tg, sd)->rq_weight = rq_weight; | ||
1656 | aggregate(tg, sd)->task_weight = task_weight; | ||
1657 | } | ||
1658 | |||
1659 | /* | ||
1660 | * Redistribute tg->shares amongst all tg->cfs_rq[]s. | ||
1661 | */ | ||
1662 | static void __aggregate_redistribute_shares(struct task_group *tg) | ||
1663 | { | ||
1664 | int i, max_cpu = smp_processor_id(); | ||
1665 | unsigned long rq_weight = 0; | ||
1666 | unsigned long shares, max_shares = 0, shares_rem = tg->shares; | ||
1667 | |||
1668 | for_each_possible_cpu(i) | ||
1669 | rq_weight += tg->cfs_rq[i]->load.weight; | ||
1670 | |||
1671 | for_each_possible_cpu(i) { | ||
1672 | /* | ||
1673 | * divide shares proportional to the rq_weights. | ||
1674 | */ | ||
1675 | shares = tg->shares * tg->cfs_rq[i]->load.weight; | ||
1676 | shares /= rq_weight + 1; | ||
1677 | |||
1678 | tg->cfs_rq[i]->shares = shares; | ||
1679 | |||
1680 | if (shares > max_shares) { | ||
1681 | max_shares = shares; | ||
1682 | max_cpu = i; | ||
1683 | } | ||
1684 | shares_rem -= shares; | ||
1685 | } | ||
1686 | |||
1687 | /* | ||
1688 | * Ensure it all adds up to tg->shares; we can loose a few | ||
1689 | * due to rounding down when computing the per-cpu shares. | ||
1690 | */ | ||
1691 | if (shares_rem) | ||
1692 | tg->cfs_rq[max_cpu]->shares += shares_rem; | ||
1693 | } | ||
1694 | |||
1695 | /* | ||
1696 | * Compute the weight of this group on the given cpus. | ||
1697 | */ | ||
1698 | static | ||
1699 | void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) | ||
1700 | { | ||
1701 | unsigned long shares = 0; | ||
1702 | int i; | ||
1703 | |||
1704 | again: | ||
1705 | for_each_cpu_mask(i, sd->span) | ||
1706 | shares += tg->cfs_rq[i]->shares; | ||
1707 | |||
1708 | /* | ||
1709 | * When the span doesn't have any shares assigned, but does have | ||
1710 | * tasks to run do a machine wide rebalance (should be rare). | ||
1711 | */ | ||
1712 | if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) { | ||
1713 | __aggregate_redistribute_shares(tg); | ||
1714 | goto again; | ||
1715 | } | ||
1716 | |||
1717 | aggregate(tg, sd)->shares = shares; | ||
1718 | } | ||
1719 | |||
1720 | /* | ||
1721 | * Compute the load fraction assigned to this group, relies on the aggregate | ||
1722 | * weight and this group's parent's load, i.e. top-down. | ||
1723 | */ | ||
1724 | static | ||
1725 | void aggregate_group_load(struct task_group *tg, struct sched_domain *sd) | ||
1726 | { | ||
1727 | unsigned long load; | ||
1728 | |||
1729 | if (!tg->parent) { | ||
1730 | int i; | ||
1731 | |||
1732 | load = 0; | ||
1733 | for_each_cpu_mask(i, sd->span) | ||
1734 | load += cpu_rq(i)->load.weight; | ||
1735 | |||
1736 | } else { | ||
1737 | load = aggregate(tg->parent, sd)->load; | ||
1738 | |||
1739 | /* | ||
1740 | * shares is our weight in the parent's rq so | ||
1741 | * shares/parent->rq_weight gives our fraction of the load | ||
1742 | */ | ||
1743 | load *= aggregate(tg, sd)->shares; | ||
1744 | load /= aggregate(tg->parent, sd)->rq_weight + 1; | ||
1745 | } | ||
1746 | |||
1747 | aggregate(tg, sd)->load = load; | ||
1748 | } | ||
1749 | |||
1750 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | ||
1751 | |||
1752 | /* | ||
1753 | * Calculate and set the cpu's group shares. | ||
1754 | */ | ||
1755 | static void | ||
1756 | __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd, | ||
1757 | int tcpu) | ||
1758 | { | ||
1759 | int boost = 0; | ||
1760 | unsigned long shares; | ||
1761 | unsigned long rq_weight; | ||
1762 | |||
1763 | if (!tg->se[tcpu]) | ||
1764 | return; | ||
1765 | |||
1766 | rq_weight = tg->cfs_rq[tcpu]->load.weight; | ||
1767 | |||
1768 | /* | ||
1769 | * If there are currently no tasks on the cpu pretend there is one of | ||
1770 | * average load so that when a new task gets to run here it will not | ||
1771 | * get delayed by group starvation. | ||
1772 | */ | ||
1773 | if (!rq_weight) { | ||
1774 | boost = 1; | ||
1775 | rq_weight = NICE_0_LOAD; | ||
1776 | } | ||
1777 | |||
1778 | /* | ||
1779 | * \Sum shares * rq_weight | ||
1780 | * shares = ----------------------- | ||
1781 | * \Sum rq_weight | ||
1782 | * | ||
1783 | */ | ||
1784 | shares = aggregate(tg, sd)->shares * rq_weight; | ||
1785 | shares /= aggregate(tg, sd)->rq_weight + 1; | ||
1786 | |||
1787 | /* | ||
1788 | * record the actual number of shares, not the boosted amount. | ||
1789 | */ | ||
1790 | tg->cfs_rq[tcpu]->shares = boost ? 0 : shares; | ||
1791 | |||
1792 | if (shares < MIN_SHARES) | ||
1793 | shares = MIN_SHARES; | ||
1794 | |||
1795 | __set_se_shares(tg->se[tcpu], shares); | ||
1796 | } | ||
1797 | |||
1798 | /* | ||
1799 | * Re-adjust the weights on the cpu the task came from and on the cpu the | ||
1800 | * task went to. | ||
1801 | */ | ||
1802 | static void | ||
1803 | __move_group_shares(struct task_group *tg, struct sched_domain *sd, | ||
1804 | int scpu, int dcpu) | ||
1805 | { | ||
1806 | unsigned long shares; | ||
1807 | |||
1808 | shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; | ||
1809 | |||
1810 | __update_group_shares_cpu(tg, sd, scpu); | ||
1811 | __update_group_shares_cpu(tg, sd, dcpu); | ||
1812 | |||
1813 | /* | ||
1814 | * ensure we never loose shares due to rounding errors in the | ||
1815 | * above redistribution. | ||
1816 | */ | ||
1817 | shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; | ||
1818 | if (shares) | ||
1819 | tg->cfs_rq[dcpu]->shares += shares; | ||
1820 | } | ||
1821 | |||
1822 | /* | ||
1823 | * Because changing a group's shares changes the weight of the super-group | ||
1824 | * we need to walk up the tree and change all shares until we hit the root. | ||
1825 | */ | ||
1826 | static void | ||
1827 | move_group_shares(struct task_group *tg, struct sched_domain *sd, | ||
1828 | int scpu, int dcpu) | ||
1829 | { | ||
1830 | while (tg) { | ||
1831 | __move_group_shares(tg, sd, scpu, dcpu); | ||
1832 | tg = tg->parent; | ||
1833 | } | ||
1834 | } | ||
1835 | |||
1836 | static | ||
1837 | void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd) | ||
1838 | { | ||
1839 | unsigned long shares = aggregate(tg, sd)->shares; | ||
1840 | int i; | ||
1841 | |||
1842 | for_each_cpu_mask(i, sd->span) { | ||
1843 | struct rq *rq = cpu_rq(i); | ||
1844 | unsigned long flags; | ||
1845 | |||
1846 | spin_lock_irqsave(&rq->lock, flags); | ||
1847 | __update_group_shares_cpu(tg, sd, i); | ||
1848 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1849 | } | ||
1850 | |||
1851 | aggregate_group_shares(tg, sd); | ||
1852 | |||
1853 | /* | ||
1854 | * ensure we never loose shares due to rounding errors in the | ||
1855 | * above redistribution. | ||
1856 | */ | ||
1857 | shares -= aggregate(tg, sd)->shares; | ||
1858 | if (shares) { | ||
1859 | tg->cfs_rq[sd->first_cpu]->shares += shares; | ||
1860 | aggregate(tg, sd)->shares += shares; | ||
1861 | } | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * Calculate the accumulative weight and recursive load of each task group | ||
1866 | * while walking down the tree. | ||
1867 | */ | ||
1868 | static | ||
1869 | void aggregate_get_down(struct task_group *tg, struct sched_domain *sd) | ||
1870 | { | ||
1871 | aggregate_group_weight(tg, sd); | ||
1872 | aggregate_group_shares(tg, sd); | ||
1873 | aggregate_group_load(tg, sd); | ||
1874 | } | ||
1875 | |||
1876 | /* | ||
1877 | * Rebalance the cpu shares while walking back up the tree. | ||
1878 | */ | ||
1879 | static | ||
1880 | void aggregate_get_up(struct task_group *tg, struct sched_domain *sd) | ||
1881 | { | ||
1882 | aggregate_group_set_shares(tg, sd); | ||
1883 | } | ||
1884 | |||
1885 | static DEFINE_PER_CPU(spinlock_t, aggregate_lock); | ||
1886 | |||
1887 | static void __init init_aggregate(void) | ||
1888 | { | ||
1889 | int i; | ||
1890 | |||
1891 | for_each_possible_cpu(i) | ||
1892 | spin_lock_init(&per_cpu(aggregate_lock, i)); | ||
1893 | } | ||
1894 | |||
1895 | static int get_aggregate(struct sched_domain *sd) | ||
1896 | { | ||
1897 | if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu))) | ||
1898 | return 0; | ||
1899 | |||
1900 | aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd); | ||
1901 | return 1; | ||
1902 | } | ||
1903 | |||
1904 | static void put_aggregate(struct sched_domain *sd) | ||
1905 | { | ||
1906 | spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu)); | ||
1907 | } | ||
1908 | |||
1909 | static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | ||
1910 | { | ||
1911 | cfs_rq->shares = shares; | ||
1912 | } | ||
1913 | |||
1914 | #else | ||
1915 | |||
1916 | static inline void init_aggregate(void) | ||
1917 | { | ||
1918 | } | ||
1919 | |||
1920 | static inline int get_aggregate(struct sched_domain *sd) | ||
1921 | { | ||
1922 | return 0; | ||
1923 | } | ||
1924 | |||
1925 | static inline void put_aggregate(struct sched_domain *sd) | ||
1926 | { | ||
1927 | } | ||
1928 | #endif | ||
1929 | |||
1930 | #else /* CONFIG_SMP */ | ||
1931 | |||
1932 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1933 | static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | ||
1934 | { | ||
1935 | } | ||
1936 | #endif | ||
1937 | |||
1249 | #endif /* CONFIG_SMP */ | 1938 | #endif /* CONFIG_SMP */ |
1250 | 1939 | ||
1251 | #include "sched_stats.h" | 1940 | #include "sched_stats.h" |
@@ -1258,26 +1947,14 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
1258 | 1947 | ||
1259 | #define sched_class_highest (&rt_sched_class) | 1948 | #define sched_class_highest (&rt_sched_class) |
1260 | 1949 | ||
1261 | static inline void inc_load(struct rq *rq, const struct task_struct *p) | 1950 | static void inc_nr_running(struct rq *rq) |
1262 | { | ||
1263 | update_load_add(&rq->load, p->se.load.weight); | ||
1264 | } | ||
1265 | |||
1266 | static inline void dec_load(struct rq *rq, const struct task_struct *p) | ||
1267 | { | ||
1268 | update_load_sub(&rq->load, p->se.load.weight); | ||
1269 | } | ||
1270 | |||
1271 | static void inc_nr_running(struct task_struct *p, struct rq *rq) | ||
1272 | { | 1951 | { |
1273 | rq->nr_running++; | 1952 | rq->nr_running++; |
1274 | inc_load(rq, p); | ||
1275 | } | 1953 | } |
1276 | 1954 | ||
1277 | static void dec_nr_running(struct task_struct *p, struct rq *rq) | 1955 | static void dec_nr_running(struct rq *rq) |
1278 | { | 1956 | { |
1279 | rq->nr_running--; | 1957 | rq->nr_running--; |
1280 | dec_load(rq, p); | ||
1281 | } | 1958 | } |
1282 | 1959 | ||
1283 | static void set_load_weight(struct task_struct *p) | 1960 | static void set_load_weight(struct task_struct *p) |
@@ -1369,7 +2046,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1369 | rq->nr_uninterruptible--; | 2046 | rq->nr_uninterruptible--; |
1370 | 2047 | ||
1371 | enqueue_task(rq, p, wakeup); | 2048 | enqueue_task(rq, p, wakeup); |
1372 | inc_nr_running(p, rq); | 2049 | inc_nr_running(rq); |
1373 | } | 2050 | } |
1374 | 2051 | ||
1375 | /* | 2052 | /* |
@@ -1381,7 +2058,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | |||
1381 | rq->nr_uninterruptible++; | 2058 | rq->nr_uninterruptible++; |
1382 | 2059 | ||
1383 | dequeue_task(rq, p, sleep); | 2060 | dequeue_task(rq, p, sleep); |
1384 | dec_nr_running(p, rq); | 2061 | dec_nr_running(rq); |
1385 | } | 2062 | } |
1386 | 2063 | ||
1387 | /** | 2064 | /** |
@@ -1438,7 +2115,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1438 | /* | 2115 | /* |
1439 | * Buddy candidates are cache hot: | 2116 | * Buddy candidates are cache hot: |
1440 | */ | 2117 | */ |
1441 | if (&p->se == cfs_rq_of(&p->se)->next) | 2118 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) |
1442 | return 1; | 2119 | return 1; |
1443 | 2120 | ||
1444 | if (p->sched_class != &fair_sched_class) | 2121 | if (p->sched_class != &fair_sched_class) |
@@ -1728,17 +2405,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
1728 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2405 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
1729 | */ | 2406 | */ |
1730 | static int | 2407 | static int |
1731 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | 2408 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, |
2409 | cpumask_t *tmp) | ||
1732 | { | 2410 | { |
1733 | cpumask_t tmp; | ||
1734 | unsigned long load, min_load = ULONG_MAX; | 2411 | unsigned long load, min_load = ULONG_MAX; |
1735 | int idlest = -1; | 2412 | int idlest = -1; |
1736 | int i; | 2413 | int i; |
1737 | 2414 | ||
1738 | /* Traverse only the allowed CPUs */ | 2415 | /* Traverse only the allowed CPUs */ |
1739 | cpus_and(tmp, group->cpumask, p->cpus_allowed); | 2416 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); |
1740 | 2417 | ||
1741 | for_each_cpu_mask(i, tmp) { | 2418 | for_each_cpu_mask(i, *tmp) { |
1742 | load = weighted_cpuload(i); | 2419 | load = weighted_cpuload(i); |
1743 | 2420 | ||
1744 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2421 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -1777,7 +2454,7 @@ static int sched_balance_self(int cpu, int flag) | |||
1777 | } | 2454 | } |
1778 | 2455 | ||
1779 | while (sd) { | 2456 | while (sd) { |
1780 | cpumask_t span; | 2457 | cpumask_t span, tmpmask; |
1781 | struct sched_group *group; | 2458 | struct sched_group *group; |
1782 | int new_cpu, weight; | 2459 | int new_cpu, weight; |
1783 | 2460 | ||
@@ -1793,7 +2470,7 @@ static int sched_balance_self(int cpu, int flag) | |||
1793 | continue; | 2470 | continue; |
1794 | } | 2471 | } |
1795 | 2472 | ||
1796 | new_cpu = find_idlest_cpu(group, t, cpu); | 2473 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); |
1797 | if (new_cpu == -1 || new_cpu == cpu) { | 2474 | if (new_cpu == -1 || new_cpu == cpu) { |
1798 | /* Now try balancing at a lower domain level of cpu */ | 2475 | /* Now try balancing at a lower domain level of cpu */ |
1799 | sd = sd->child; | 2476 | sd = sd->child; |
@@ -1839,6 +2516,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
1839 | long old_state; | 2516 | long old_state; |
1840 | struct rq *rq; | 2517 | struct rq *rq; |
1841 | 2518 | ||
2519 | if (!sched_feat(SYNC_WAKEUPS)) | ||
2520 | sync = 0; | ||
2521 | |||
1842 | smp_wmb(); | 2522 | smp_wmb(); |
1843 | rq = task_rq_lock(p, &flags); | 2523 | rq = task_rq_lock(p, &flags); |
1844 | old_state = p->state; | 2524 | old_state = p->state; |
@@ -1955,6 +2635,7 @@ static void __sched_fork(struct task_struct *p) | |||
1955 | 2635 | ||
1956 | INIT_LIST_HEAD(&p->rt.run_list); | 2636 | INIT_LIST_HEAD(&p->rt.run_list); |
1957 | p->se.on_rq = 0; | 2637 | p->se.on_rq = 0; |
2638 | INIT_LIST_HEAD(&p->se.group_node); | ||
1958 | 2639 | ||
1959 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2640 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
1960 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2641 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
@@ -2030,7 +2711,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2030 | * management (if any): | 2711 | * management (if any): |
2031 | */ | 2712 | */ |
2032 | p->sched_class->task_new(rq, p); | 2713 | p->sched_class->task_new(rq, p); |
2033 | inc_nr_running(p, rq); | 2714 | inc_nr_running(rq); |
2034 | } | 2715 | } |
2035 | check_preempt_curr(rq, p); | 2716 | check_preempt_curr(rq, p); |
2036 | #ifdef CONFIG_SMP | 2717 | #ifdef CONFIG_SMP |
@@ -2674,7 +3355,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2674 | static struct sched_group * | 3355 | static struct sched_group * |
2675 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3356 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
2676 | unsigned long *imbalance, enum cpu_idle_type idle, | 3357 | unsigned long *imbalance, enum cpu_idle_type idle, |
2677 | int *sd_idle, cpumask_t *cpus, int *balance) | 3358 | int *sd_idle, const cpumask_t *cpus, int *balance) |
2678 | { | 3359 | { |
2679 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3360 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
2680 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3361 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -2975,7 +3656,7 @@ ret: | |||
2975 | */ | 3656 | */ |
2976 | static struct rq * | 3657 | static struct rq * |
2977 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3658 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
2978 | unsigned long imbalance, cpumask_t *cpus) | 3659 | unsigned long imbalance, const cpumask_t *cpus) |
2979 | { | 3660 | { |
2980 | struct rq *busiest = NULL, *rq; | 3661 | struct rq *busiest = NULL, *rq; |
2981 | unsigned long max_load = 0; | 3662 | unsigned long max_load = 0; |
@@ -3014,14 +3695,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3014 | */ | 3695 | */ |
3015 | static int load_balance(int this_cpu, struct rq *this_rq, | 3696 | static int load_balance(int this_cpu, struct rq *this_rq, |
3016 | struct sched_domain *sd, enum cpu_idle_type idle, | 3697 | struct sched_domain *sd, enum cpu_idle_type idle, |
3017 | int *balance) | 3698 | int *balance, cpumask_t *cpus) |
3018 | { | 3699 | { |
3019 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3700 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3020 | struct sched_group *group; | 3701 | struct sched_group *group; |
3021 | unsigned long imbalance; | 3702 | unsigned long imbalance; |
3022 | struct rq *busiest; | 3703 | struct rq *busiest; |
3023 | cpumask_t cpus = CPU_MASK_ALL; | ||
3024 | unsigned long flags; | 3704 | unsigned long flags; |
3705 | int unlock_aggregate; | ||
3706 | |||
3707 | cpus_setall(*cpus); | ||
3708 | |||
3709 | unlock_aggregate = get_aggregate(sd); | ||
3025 | 3710 | ||
3026 | /* | 3711 | /* |
3027 | * When power savings policy is enabled for the parent domain, idle | 3712 | * When power savings policy is enabled for the parent domain, idle |
@@ -3037,7 +3722,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3037 | 3722 | ||
3038 | redo: | 3723 | redo: |
3039 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, | 3724 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, |
3040 | &cpus, balance); | 3725 | cpus, balance); |
3041 | 3726 | ||
3042 | if (*balance == 0) | 3727 | if (*balance == 0) |
3043 | goto out_balanced; | 3728 | goto out_balanced; |
@@ -3047,7 +3732,7 @@ redo: | |||
3047 | goto out_balanced; | 3732 | goto out_balanced; |
3048 | } | 3733 | } |
3049 | 3734 | ||
3050 | busiest = find_busiest_queue(group, idle, imbalance, &cpus); | 3735 | busiest = find_busiest_queue(group, idle, imbalance, cpus); |
3051 | if (!busiest) { | 3736 | if (!busiest) { |
3052 | schedstat_inc(sd, lb_nobusyq[idle]); | 3737 | schedstat_inc(sd, lb_nobusyq[idle]); |
3053 | goto out_balanced; | 3738 | goto out_balanced; |
@@ -3080,8 +3765,8 @@ redo: | |||
3080 | 3765 | ||
3081 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3766 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3082 | if (unlikely(all_pinned)) { | 3767 | if (unlikely(all_pinned)) { |
3083 | cpu_clear(cpu_of(busiest), cpus); | 3768 | cpu_clear(cpu_of(busiest), *cpus); |
3084 | if (!cpus_empty(cpus)) | 3769 | if (!cpus_empty(*cpus)) |
3085 | goto redo; | 3770 | goto redo; |
3086 | goto out_balanced; | 3771 | goto out_balanced; |
3087 | } | 3772 | } |
@@ -3138,8 +3823,9 @@ redo: | |||
3138 | 3823 | ||
3139 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3824 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3140 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3825 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3141 | return -1; | 3826 | ld_moved = -1; |
3142 | return ld_moved; | 3827 | |
3828 | goto out; | ||
3143 | 3829 | ||
3144 | out_balanced: | 3830 | out_balanced: |
3145 | schedstat_inc(sd, lb_balanced[idle]); | 3831 | schedstat_inc(sd, lb_balanced[idle]); |
@@ -3154,8 +3840,13 @@ out_one_pinned: | |||
3154 | 3840 | ||
3155 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3841 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3156 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3842 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3157 | return -1; | 3843 | ld_moved = -1; |
3158 | return 0; | 3844 | else |
3845 | ld_moved = 0; | ||
3846 | out: | ||
3847 | if (unlock_aggregate) | ||
3848 | put_aggregate(sd); | ||
3849 | return ld_moved; | ||
3159 | } | 3850 | } |
3160 | 3851 | ||
3161 | /* | 3852 | /* |
@@ -3166,7 +3857,8 @@ out_one_pinned: | |||
3166 | * this_rq is locked. | 3857 | * this_rq is locked. |
3167 | */ | 3858 | */ |
3168 | static int | 3859 | static int |
3169 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | 3860 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3861 | cpumask_t *cpus) | ||
3170 | { | 3862 | { |
3171 | struct sched_group *group; | 3863 | struct sched_group *group; |
3172 | struct rq *busiest = NULL; | 3864 | struct rq *busiest = NULL; |
@@ -3174,7 +3866,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
3174 | int ld_moved = 0; | 3866 | int ld_moved = 0; |
3175 | int sd_idle = 0; | 3867 | int sd_idle = 0; |
3176 | int all_pinned = 0; | 3868 | int all_pinned = 0; |
3177 | cpumask_t cpus = CPU_MASK_ALL; | 3869 | |
3870 | cpus_setall(*cpus); | ||
3178 | 3871 | ||
3179 | /* | 3872 | /* |
3180 | * When power savings policy is enabled for the parent domain, idle | 3873 | * When power savings policy is enabled for the parent domain, idle |
@@ -3189,14 +3882,13 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
3189 | schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); | 3882 | schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); |
3190 | redo: | 3883 | redo: |
3191 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, | 3884 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, |
3192 | &sd_idle, &cpus, NULL); | 3885 | &sd_idle, cpus, NULL); |
3193 | if (!group) { | 3886 | if (!group) { |
3194 | schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]); | 3887 | schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]); |
3195 | goto out_balanced; | 3888 | goto out_balanced; |
3196 | } | 3889 | } |
3197 | 3890 | ||
3198 | busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, | 3891 | busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus); |
3199 | &cpus); | ||
3200 | if (!busiest) { | 3892 | if (!busiest) { |
3201 | schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]); | 3893 | schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]); |
3202 | goto out_balanced; | 3894 | goto out_balanced; |
@@ -3218,8 +3910,8 @@ redo: | |||
3218 | spin_unlock(&busiest->lock); | 3910 | spin_unlock(&busiest->lock); |
3219 | 3911 | ||
3220 | if (unlikely(all_pinned)) { | 3912 | if (unlikely(all_pinned)) { |
3221 | cpu_clear(cpu_of(busiest), cpus); | 3913 | cpu_clear(cpu_of(busiest), *cpus); |
3222 | if (!cpus_empty(cpus)) | 3914 | if (!cpus_empty(*cpus)) |
3223 | goto redo; | 3915 | goto redo; |
3224 | } | 3916 | } |
3225 | } | 3917 | } |
@@ -3253,6 +3945,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3253 | struct sched_domain *sd; | 3945 | struct sched_domain *sd; |
3254 | int pulled_task = -1; | 3946 | int pulled_task = -1; |
3255 | unsigned long next_balance = jiffies + HZ; | 3947 | unsigned long next_balance = jiffies + HZ; |
3948 | cpumask_t tmpmask; | ||
3256 | 3949 | ||
3257 | for_each_domain(this_cpu, sd) { | 3950 | for_each_domain(this_cpu, sd) { |
3258 | unsigned long interval; | 3951 | unsigned long interval; |
@@ -3262,8 +3955,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3262 | 3955 | ||
3263 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3956 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3264 | /* If we've pulled tasks over stop searching: */ | 3957 | /* If we've pulled tasks over stop searching: */ |
3265 | pulled_task = load_balance_newidle(this_cpu, | 3958 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3266 | this_rq, sd); | 3959 | sd, &tmpmask); |
3267 | 3960 | ||
3268 | interval = msecs_to_jiffies(sd->balance_interval); | 3961 | interval = msecs_to_jiffies(sd->balance_interval); |
3269 | if (time_after(next_balance, sd->last_balance + interval)) | 3962 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3422,6 +4115,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3422 | /* Earliest time when we have to do rebalance again */ | 4115 | /* Earliest time when we have to do rebalance again */ |
3423 | unsigned long next_balance = jiffies + 60*HZ; | 4116 | unsigned long next_balance = jiffies + 60*HZ; |
3424 | int update_next_balance = 0; | 4117 | int update_next_balance = 0; |
4118 | cpumask_t tmp; | ||
3425 | 4119 | ||
3426 | for_each_domain(cpu, sd) { | 4120 | for_each_domain(cpu, sd) { |
3427 | if (!(sd->flags & SD_LOAD_BALANCE)) | 4121 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3445,7 +4139,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3445 | } | 4139 | } |
3446 | 4140 | ||
3447 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 4141 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3448 | if (load_balance(cpu, rq, sd, idle, &balance)) { | 4142 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { |
3449 | /* | 4143 | /* |
3450 | * We've pulled tasks over so either we're no | 4144 | * We've pulled tasks over so either we're no |
3451 | * longer idle, or one of our SMT siblings is | 4145 | * longer idle, or one of our SMT siblings is |
@@ -3561,7 +4255,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3561 | */ | 4255 | */ |
3562 | int ilb = first_cpu(nohz.cpu_mask); | 4256 | int ilb = first_cpu(nohz.cpu_mask); |
3563 | 4257 | ||
3564 | if (ilb != NR_CPUS) | 4258 | if (ilb < nr_cpu_ids) |
3565 | resched_cpu(ilb); | 4259 | resched_cpu(ilb); |
3566 | } | 4260 | } |
3567 | } | 4261 | } |
@@ -3765,9 +4459,9 @@ void scheduler_tick(void) | |||
3765 | rq->clock_underflows++; | 4459 | rq->clock_underflows++; |
3766 | } | 4460 | } |
3767 | rq->tick_timestamp = rq->clock; | 4461 | rq->tick_timestamp = rq->clock; |
4462 | update_last_tick_seen(rq); | ||
3768 | update_cpu_load(rq); | 4463 | update_cpu_load(rq); |
3769 | curr->sched_class->task_tick(rq, curr, 0); | 4464 | curr->sched_class->task_tick(rq, curr, 0); |
3770 | update_sched_rt_period(rq); | ||
3771 | spin_unlock(&rq->lock); | 4465 | spin_unlock(&rq->lock); |
3772 | 4466 | ||
3773 | #ifdef CONFIG_SMP | 4467 | #ifdef CONFIG_SMP |
@@ -4367,10 +5061,8 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4367 | goto out_unlock; | 5061 | goto out_unlock; |
4368 | } | 5062 | } |
4369 | on_rq = p->se.on_rq; | 5063 | on_rq = p->se.on_rq; |
4370 | if (on_rq) { | 5064 | if (on_rq) |
4371 | dequeue_task(rq, p, 0); | 5065 | dequeue_task(rq, p, 0); |
4372 | dec_load(rq, p); | ||
4373 | } | ||
4374 | 5066 | ||
4375 | p->static_prio = NICE_TO_PRIO(nice); | 5067 | p->static_prio = NICE_TO_PRIO(nice); |
4376 | set_load_weight(p); | 5068 | set_load_weight(p); |
@@ -4380,7 +5072,6 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4380 | 5072 | ||
4381 | if (on_rq) { | 5073 | if (on_rq) { |
4382 | enqueue_task(rq, p, 0); | 5074 | enqueue_task(rq, p, 0); |
4383 | inc_load(rq, p); | ||
4384 | /* | 5075 | /* |
4385 | * If the task increased its priority or is running and | 5076 | * If the task increased its priority or is running and |
4386 | * lowered its priority, then reschedule its CPU: | 5077 | * lowered its priority, then reschedule its CPU: |
@@ -4602,7 +5293,7 @@ recheck: | |||
4602 | * Do not allow realtime tasks into groups that have no runtime | 5293 | * Do not allow realtime tasks into groups that have no runtime |
4603 | * assigned. | 5294 | * assigned. |
4604 | */ | 5295 | */ |
4605 | if (rt_policy(policy) && task_group(p)->rt_runtime == 0) | 5296 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) |
4606 | return -EPERM; | 5297 | return -EPERM; |
4607 | #endif | 5298 | #endif |
4608 | 5299 | ||
@@ -4764,9 +5455,10 @@ out_unlock: | |||
4764 | return retval; | 5455 | return retval; |
4765 | } | 5456 | } |
4766 | 5457 | ||
4767 | long sched_setaffinity(pid_t pid, cpumask_t new_mask) | 5458 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) |
4768 | { | 5459 | { |
4769 | cpumask_t cpus_allowed; | 5460 | cpumask_t cpus_allowed; |
5461 | cpumask_t new_mask = *in_mask; | ||
4770 | struct task_struct *p; | 5462 | struct task_struct *p; |
4771 | int retval; | 5463 | int retval; |
4772 | 5464 | ||
@@ -4797,13 +5489,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4797 | if (retval) | 5489 | if (retval) |
4798 | goto out_unlock; | 5490 | goto out_unlock; |
4799 | 5491 | ||
4800 | cpus_allowed = cpuset_cpus_allowed(p); | 5492 | cpuset_cpus_allowed(p, &cpus_allowed); |
4801 | cpus_and(new_mask, new_mask, cpus_allowed); | 5493 | cpus_and(new_mask, new_mask, cpus_allowed); |
4802 | again: | 5494 | again: |
4803 | retval = set_cpus_allowed(p, new_mask); | 5495 | retval = set_cpus_allowed_ptr(p, &new_mask); |
4804 | 5496 | ||
4805 | if (!retval) { | 5497 | if (!retval) { |
4806 | cpus_allowed = cpuset_cpus_allowed(p); | 5498 | cpuset_cpus_allowed(p, &cpus_allowed); |
4807 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5499 | if (!cpus_subset(new_mask, cpus_allowed)) { |
4808 | /* | 5500 | /* |
4809 | * We must have raced with a concurrent cpuset | 5501 | * We must have raced with a concurrent cpuset |
@@ -4847,7 +5539,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
4847 | if (retval) | 5539 | if (retval) |
4848 | return retval; | 5540 | return retval; |
4849 | 5541 | ||
4850 | return sched_setaffinity(pid, new_mask); | 5542 | return sched_setaffinity(pid, &new_mask); |
4851 | } | 5543 | } |
4852 | 5544 | ||
4853 | /* | 5545 | /* |
@@ -5309,7 +6001,6 @@ static inline void sched_init_granularity(void) | |||
5309 | sysctl_sched_latency = limit; | 6001 | sysctl_sched_latency = limit; |
5310 | 6002 | ||
5311 | sysctl_sched_wakeup_granularity *= factor; | 6003 | sysctl_sched_wakeup_granularity *= factor; |
5312 | sysctl_sched_batch_wakeup_granularity *= factor; | ||
5313 | } | 6004 | } |
5314 | 6005 | ||
5315 | #ifdef CONFIG_SMP | 6006 | #ifdef CONFIG_SMP |
@@ -5338,7 +6029,7 @@ static inline void sched_init_granularity(void) | |||
5338 | * task must not exit() & deallocate itself prematurely. The | 6029 | * task must not exit() & deallocate itself prematurely. The |
5339 | * call is not atomic; no spinlocks may be held. | 6030 | * call is not atomic; no spinlocks may be held. |
5340 | */ | 6031 | */ |
5341 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 6032 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) |
5342 | { | 6033 | { |
5343 | struct migration_req req; | 6034 | struct migration_req req; |
5344 | unsigned long flags; | 6035 | unsigned long flags; |
@@ -5346,23 +6037,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
5346 | int ret = 0; | 6037 | int ret = 0; |
5347 | 6038 | ||
5348 | rq = task_rq_lock(p, &flags); | 6039 | rq = task_rq_lock(p, &flags); |
5349 | if (!cpus_intersects(new_mask, cpu_online_map)) { | 6040 | if (!cpus_intersects(*new_mask, cpu_online_map)) { |
5350 | ret = -EINVAL; | 6041 | ret = -EINVAL; |
5351 | goto out; | 6042 | goto out; |
5352 | } | 6043 | } |
5353 | 6044 | ||
5354 | if (p->sched_class->set_cpus_allowed) | 6045 | if (p->sched_class->set_cpus_allowed) |
5355 | p->sched_class->set_cpus_allowed(p, &new_mask); | 6046 | p->sched_class->set_cpus_allowed(p, new_mask); |
5356 | else { | 6047 | else { |
5357 | p->cpus_allowed = new_mask; | 6048 | p->cpus_allowed = *new_mask; |
5358 | p->rt.nr_cpus_allowed = cpus_weight(new_mask); | 6049 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); |
5359 | } | 6050 | } |
5360 | 6051 | ||
5361 | /* Can the task run on the task's current CPU? If so, we're done */ | 6052 | /* Can the task run on the task's current CPU? If so, we're done */ |
5362 | if (cpu_isset(task_cpu(p), new_mask)) | 6053 | if (cpu_isset(task_cpu(p), *new_mask)) |
5363 | goto out; | 6054 | goto out; |
5364 | 6055 | ||
5365 | if (migrate_task(p, any_online_cpu(new_mask), &req)) { | 6056 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { |
5366 | /* Need help from migration thread: drop lock and wait. */ | 6057 | /* Need help from migration thread: drop lock and wait. */ |
5367 | task_rq_unlock(rq, &flags); | 6058 | task_rq_unlock(rq, &flags); |
5368 | wake_up_process(rq->migration_thread); | 6059 | wake_up_process(rq->migration_thread); |
@@ -5375,7 +6066,7 @@ out: | |||
5375 | 6066 | ||
5376 | return ret; | 6067 | return ret; |
5377 | } | 6068 | } |
5378 | EXPORT_SYMBOL_GPL(set_cpus_allowed); | 6069 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
5379 | 6070 | ||
5380 | /* | 6071 | /* |
5381 | * Move (not current) task off this cpu, onto dest cpu. We're doing | 6072 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
@@ -5513,12 +6204,14 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5513 | dest_cpu = any_online_cpu(mask); | 6204 | dest_cpu = any_online_cpu(mask); |
5514 | 6205 | ||
5515 | /* On any allowed CPU? */ | 6206 | /* On any allowed CPU? */ |
5516 | if (dest_cpu == NR_CPUS) | 6207 | if (dest_cpu >= nr_cpu_ids) |
5517 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6208 | dest_cpu = any_online_cpu(p->cpus_allowed); |
5518 | 6209 | ||
5519 | /* No more Mr. Nice Guy. */ | 6210 | /* No more Mr. Nice Guy. */ |
5520 | if (dest_cpu == NR_CPUS) { | 6211 | if (dest_cpu >= nr_cpu_ids) { |
5521 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); | 6212 | cpumask_t cpus_allowed; |
6213 | |||
6214 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
5522 | /* | 6215 | /* |
5523 | * Try to stay on the same cpuset, where the | 6216 | * Try to stay on the same cpuset, where the |
5524 | * current cpuset may be a subset of all cpus. | 6217 | * current cpuset may be a subset of all cpus. |
@@ -5554,7 +6247,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5554 | */ | 6247 | */ |
5555 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6248 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
5556 | { | 6249 | { |
5557 | struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); | 6250 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); |
5558 | unsigned long flags; | 6251 | unsigned long flags; |
5559 | 6252 | ||
5560 | local_irq_save(flags); | 6253 | local_irq_save(flags); |
@@ -5966,20 +6659,16 @@ void __init migration_init(void) | |||
5966 | 6659 | ||
5967 | #ifdef CONFIG_SMP | 6660 | #ifdef CONFIG_SMP |
5968 | 6661 | ||
5969 | /* Number of possible processor ids */ | ||
5970 | int nr_cpu_ids __read_mostly = NR_CPUS; | ||
5971 | EXPORT_SYMBOL(nr_cpu_ids); | ||
5972 | |||
5973 | #ifdef CONFIG_SCHED_DEBUG | 6662 | #ifdef CONFIG_SCHED_DEBUG |
5974 | 6663 | ||
5975 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | 6664 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6665 | cpumask_t *groupmask) | ||
5976 | { | 6666 | { |
5977 | struct sched_group *group = sd->groups; | 6667 | struct sched_group *group = sd->groups; |
5978 | cpumask_t groupmask; | 6668 | char str[256]; |
5979 | char str[NR_CPUS]; | ||
5980 | 6669 | ||
5981 | cpumask_scnprintf(str, NR_CPUS, sd->span); | 6670 | cpulist_scnprintf(str, sizeof(str), sd->span); |
5982 | cpus_clear(groupmask); | 6671 | cpus_clear(*groupmask); |
5983 | 6672 | ||
5984 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6673 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
5985 | 6674 | ||
@@ -6023,25 +6712,25 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | |||
6023 | break; | 6712 | break; |
6024 | } | 6713 | } |
6025 | 6714 | ||
6026 | if (cpus_intersects(groupmask, group->cpumask)) { | 6715 | if (cpus_intersects(*groupmask, group->cpumask)) { |
6027 | printk(KERN_CONT "\n"); | 6716 | printk(KERN_CONT "\n"); |
6028 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6717 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6029 | break; | 6718 | break; |
6030 | } | 6719 | } |
6031 | 6720 | ||
6032 | cpus_or(groupmask, groupmask, group->cpumask); | 6721 | cpus_or(*groupmask, *groupmask, group->cpumask); |
6033 | 6722 | ||
6034 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); | 6723 | cpulist_scnprintf(str, sizeof(str), group->cpumask); |
6035 | printk(KERN_CONT " %s", str); | 6724 | printk(KERN_CONT " %s", str); |
6036 | 6725 | ||
6037 | group = group->next; | 6726 | group = group->next; |
6038 | } while (group != sd->groups); | 6727 | } while (group != sd->groups); |
6039 | printk(KERN_CONT "\n"); | 6728 | printk(KERN_CONT "\n"); |
6040 | 6729 | ||
6041 | if (!cpus_equal(sd->span, groupmask)) | 6730 | if (!cpus_equal(sd->span, *groupmask)) |
6042 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6731 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6043 | 6732 | ||
6044 | if (sd->parent && !cpus_subset(groupmask, sd->parent->span)) | 6733 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) |
6045 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6734 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6046 | "of domain->span\n"); | 6735 | "of domain->span\n"); |
6047 | return 0; | 6736 | return 0; |
@@ -6049,6 +6738,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | |||
6049 | 6738 | ||
6050 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6739 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6051 | { | 6740 | { |
6741 | cpumask_t *groupmask; | ||
6052 | int level = 0; | 6742 | int level = 0; |
6053 | 6743 | ||
6054 | if (!sd) { | 6744 | if (!sd) { |
@@ -6058,14 +6748,21 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6058 | 6748 | ||
6059 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6749 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6060 | 6750 | ||
6751 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | ||
6752 | if (!groupmask) { | ||
6753 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | ||
6754 | return; | ||
6755 | } | ||
6756 | |||
6061 | for (;;) { | 6757 | for (;;) { |
6062 | if (sched_domain_debug_one(sd, cpu, level)) | 6758 | if (sched_domain_debug_one(sd, cpu, level, groupmask)) |
6063 | break; | 6759 | break; |
6064 | level++; | 6760 | level++; |
6065 | sd = sd->parent; | 6761 | sd = sd->parent; |
6066 | if (!sd) | 6762 | if (!sd) |
6067 | break; | 6763 | break; |
6068 | } | 6764 | } |
6765 | kfree(groupmask); | ||
6069 | } | 6766 | } |
6070 | #else | 6767 | #else |
6071 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6768 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6253,30 +6950,33 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6253 | * and ->cpu_power to 0. | 6950 | * and ->cpu_power to 0. |
6254 | */ | 6951 | */ |
6255 | static void | 6952 | static void |
6256 | init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, | 6953 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, |
6257 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 6954 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, |
6258 | struct sched_group **sg)) | 6955 | struct sched_group **sg, |
6956 | cpumask_t *tmpmask), | ||
6957 | cpumask_t *covered, cpumask_t *tmpmask) | ||
6259 | { | 6958 | { |
6260 | struct sched_group *first = NULL, *last = NULL; | 6959 | struct sched_group *first = NULL, *last = NULL; |
6261 | cpumask_t covered = CPU_MASK_NONE; | ||
6262 | int i; | 6960 | int i; |
6263 | 6961 | ||
6264 | for_each_cpu_mask(i, span) { | 6962 | cpus_clear(*covered); |
6963 | |||
6964 | for_each_cpu_mask(i, *span) { | ||
6265 | struct sched_group *sg; | 6965 | struct sched_group *sg; |
6266 | int group = group_fn(i, cpu_map, &sg); | 6966 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6267 | int j; | 6967 | int j; |
6268 | 6968 | ||
6269 | if (cpu_isset(i, covered)) | 6969 | if (cpu_isset(i, *covered)) |
6270 | continue; | 6970 | continue; |
6271 | 6971 | ||
6272 | sg->cpumask = CPU_MASK_NONE; | 6972 | cpus_clear(sg->cpumask); |
6273 | sg->__cpu_power = 0; | 6973 | sg->__cpu_power = 0; |
6274 | 6974 | ||
6275 | for_each_cpu_mask(j, span) { | 6975 | for_each_cpu_mask(j, *span) { |
6276 | if (group_fn(j, cpu_map, NULL) != group) | 6976 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6277 | continue; | 6977 | continue; |
6278 | 6978 | ||
6279 | cpu_set(j, covered); | 6979 | cpu_set(j, *covered); |
6280 | cpu_set(j, sg->cpumask); | 6980 | cpu_set(j, sg->cpumask); |
6281 | } | 6981 | } |
6282 | if (!first) | 6982 | if (!first) |
@@ -6302,7 +7002,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, | |||
6302 | * | 7002 | * |
6303 | * Should use nodemask_t. | 7003 | * Should use nodemask_t. |
6304 | */ | 7004 | */ |
6305 | static int find_next_best_node(int node, unsigned long *used_nodes) | 7005 | static int find_next_best_node(int node, nodemask_t *used_nodes) |
6306 | { | 7006 | { |
6307 | int i, n, val, min_val, best_node = 0; | 7007 | int i, n, val, min_val, best_node = 0; |
6308 | 7008 | ||
@@ -6316,7 +7016,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes) | |||
6316 | continue; | 7016 | continue; |
6317 | 7017 | ||
6318 | /* Skip already used nodes */ | 7018 | /* Skip already used nodes */ |
6319 | if (test_bit(n, used_nodes)) | 7019 | if (node_isset(n, *used_nodes)) |
6320 | continue; | 7020 | continue; |
6321 | 7021 | ||
6322 | /* Simple min distance search */ | 7022 | /* Simple min distance search */ |
@@ -6328,40 +7028,36 @@ static int find_next_best_node(int node, unsigned long *used_nodes) | |||
6328 | } | 7028 | } |
6329 | } | 7029 | } |
6330 | 7030 | ||
6331 | set_bit(best_node, used_nodes); | 7031 | node_set(best_node, *used_nodes); |
6332 | return best_node; | 7032 | return best_node; |
6333 | } | 7033 | } |
6334 | 7034 | ||
6335 | /** | 7035 | /** |
6336 | * sched_domain_node_span - get a cpumask for a node's sched_domain | 7036 | * sched_domain_node_span - get a cpumask for a node's sched_domain |
6337 | * @node: node whose cpumask we're constructing | 7037 | * @node: node whose cpumask we're constructing |
6338 | * @size: number of nodes to include in this span | ||
6339 | * | 7038 | * |
6340 | * Given a node, construct a good cpumask for its sched_domain to span. It | 7039 | * Given a node, construct a good cpumask for its sched_domain to span. It |
6341 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7040 | * should be one that prevents unnecessary balancing, but also spreads tasks |
6342 | * out optimally. | 7041 | * out optimally. |
6343 | */ | 7042 | */ |
6344 | static cpumask_t sched_domain_node_span(int node) | 7043 | static void sched_domain_node_span(int node, cpumask_t *span) |
6345 | { | 7044 | { |
6346 | DECLARE_BITMAP(used_nodes, MAX_NUMNODES); | 7045 | nodemask_t used_nodes; |
6347 | cpumask_t span, nodemask; | 7046 | node_to_cpumask_ptr(nodemask, node); |
6348 | int i; | 7047 | int i; |
6349 | 7048 | ||
6350 | cpus_clear(span); | 7049 | cpus_clear(*span); |
6351 | bitmap_zero(used_nodes, MAX_NUMNODES); | 7050 | nodes_clear(used_nodes); |
6352 | 7051 | ||
6353 | nodemask = node_to_cpumask(node); | 7052 | cpus_or(*span, *span, *nodemask); |
6354 | cpus_or(span, span, nodemask); | 7053 | node_set(node, used_nodes); |
6355 | set_bit(node, used_nodes); | ||
6356 | 7054 | ||
6357 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7055 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
6358 | int next_node = find_next_best_node(node, used_nodes); | 7056 | int next_node = find_next_best_node(node, &used_nodes); |
6359 | 7057 | ||
6360 | nodemask = node_to_cpumask(next_node); | 7058 | node_to_cpumask_ptr_next(nodemask, next_node); |
6361 | cpus_or(span, span, nodemask); | 7059 | cpus_or(*span, *span, *nodemask); |
6362 | } | 7060 | } |
6363 | |||
6364 | return span; | ||
6365 | } | 7061 | } |
6366 | #endif | 7062 | #endif |
6367 | 7063 | ||
@@ -6375,7 +7071,8 @@ static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | |||
6375 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7071 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); |
6376 | 7072 | ||
6377 | static int | 7073 | static int |
6378 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | 7074 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, |
7075 | cpumask_t *unused) | ||
6379 | { | 7076 | { |
6380 | if (sg) | 7077 | if (sg) |
6381 | *sg = &per_cpu(sched_group_cpus, cpu); | 7078 | *sg = &per_cpu(sched_group_cpus, cpu); |
@@ -6393,19 +7090,22 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core); | |||
6393 | 7090 | ||
6394 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7091 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
6395 | static int | 7092 | static int |
6396 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | 7093 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, |
7094 | cpumask_t *mask) | ||
6397 | { | 7095 | { |
6398 | int group; | 7096 | int group; |
6399 | cpumask_t mask = per_cpu(cpu_sibling_map, cpu); | 7097 | |
6400 | cpus_and(mask, mask, *cpu_map); | 7098 | *mask = per_cpu(cpu_sibling_map, cpu); |
6401 | group = first_cpu(mask); | 7099 | cpus_and(*mask, *mask, *cpu_map); |
7100 | group = first_cpu(*mask); | ||
6402 | if (sg) | 7101 | if (sg) |
6403 | *sg = &per_cpu(sched_group_core, group); | 7102 | *sg = &per_cpu(sched_group_core, group); |
6404 | return group; | 7103 | return group; |
6405 | } | 7104 | } |
6406 | #elif defined(CONFIG_SCHED_MC) | 7105 | #elif defined(CONFIG_SCHED_MC) |
6407 | static int | 7106 | static int |
6408 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | 7107 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, |
7108 | cpumask_t *unused) | ||
6409 | { | 7109 | { |
6410 | if (sg) | 7110 | if (sg) |
6411 | *sg = &per_cpu(sched_group_core, cpu); | 7111 | *sg = &per_cpu(sched_group_core, cpu); |
@@ -6417,17 +7117,18 @@ static DEFINE_PER_CPU(struct sched_domain, phys_domains); | |||
6417 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7117 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); |
6418 | 7118 | ||
6419 | static int | 7119 | static int |
6420 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | 7120 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, |
7121 | cpumask_t *mask) | ||
6421 | { | 7122 | { |
6422 | int group; | 7123 | int group; |
6423 | #ifdef CONFIG_SCHED_MC | 7124 | #ifdef CONFIG_SCHED_MC |
6424 | cpumask_t mask = cpu_coregroup_map(cpu); | 7125 | *mask = cpu_coregroup_map(cpu); |
6425 | cpus_and(mask, mask, *cpu_map); | 7126 | cpus_and(*mask, *mask, *cpu_map); |
6426 | group = first_cpu(mask); | 7127 | group = first_cpu(*mask); |
6427 | #elif defined(CONFIG_SCHED_SMT) | 7128 | #elif defined(CONFIG_SCHED_SMT) |
6428 | cpumask_t mask = per_cpu(cpu_sibling_map, cpu); | 7129 | *mask = per_cpu(cpu_sibling_map, cpu); |
6429 | cpus_and(mask, mask, *cpu_map); | 7130 | cpus_and(*mask, *mask, *cpu_map); |
6430 | group = first_cpu(mask); | 7131 | group = first_cpu(*mask); |
6431 | #else | 7132 | #else |
6432 | group = cpu; | 7133 | group = cpu; |
6433 | #endif | 7134 | #endif |
@@ -6443,19 +7144,19 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | |||
6443 | * gets dynamically allocated. | 7144 | * gets dynamically allocated. |
6444 | */ | 7145 | */ |
6445 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7146 | static DEFINE_PER_CPU(struct sched_domain, node_domains); |
6446 | static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; | 7147 | static struct sched_group ***sched_group_nodes_bycpu; |
6447 | 7148 | ||
6448 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7149 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
6449 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7150 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); |
6450 | 7151 | ||
6451 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7152 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, |
6452 | struct sched_group **sg) | 7153 | struct sched_group **sg, cpumask_t *nodemask) |
6453 | { | 7154 | { |
6454 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu)); | ||
6455 | int group; | 7155 | int group; |
6456 | 7156 | ||
6457 | cpus_and(nodemask, nodemask, *cpu_map); | 7157 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); |
6458 | group = first_cpu(nodemask); | 7158 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7159 | group = first_cpu(*nodemask); | ||
6459 | 7160 | ||
6460 | if (sg) | 7161 | if (sg) |
6461 | *sg = &per_cpu(sched_group_allnodes, group); | 7162 | *sg = &per_cpu(sched_group_allnodes, group); |
@@ -6491,7 +7192,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
6491 | 7192 | ||
6492 | #ifdef CONFIG_NUMA | 7193 | #ifdef CONFIG_NUMA |
6493 | /* Free memory allocated for various sched_group structures */ | 7194 | /* Free memory allocated for various sched_group structures */ |
6494 | static void free_sched_groups(const cpumask_t *cpu_map) | 7195 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) |
6495 | { | 7196 | { |
6496 | int cpu, i; | 7197 | int cpu, i; |
6497 | 7198 | ||
@@ -6503,11 +7204,11 @@ static void free_sched_groups(const cpumask_t *cpu_map) | |||
6503 | continue; | 7204 | continue; |
6504 | 7205 | ||
6505 | for (i = 0; i < MAX_NUMNODES; i++) { | 7206 | for (i = 0; i < MAX_NUMNODES; i++) { |
6506 | cpumask_t nodemask = node_to_cpumask(i); | ||
6507 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7207 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
6508 | 7208 | ||
6509 | cpus_and(nodemask, nodemask, *cpu_map); | 7209 | *nodemask = node_to_cpumask(i); |
6510 | if (cpus_empty(nodemask)) | 7210 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7211 | if (cpus_empty(*nodemask)) | ||
6511 | continue; | 7212 | continue; |
6512 | 7213 | ||
6513 | if (sg == NULL) | 7214 | if (sg == NULL) |
@@ -6525,7 +7226,7 @@ next_sg: | |||
6525 | } | 7226 | } |
6526 | } | 7227 | } |
6527 | #else | 7228 | #else |
6528 | static void free_sched_groups(const cpumask_t *cpu_map) | 7229 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) |
6529 | { | 7230 | { |
6530 | } | 7231 | } |
6531 | #endif | 7232 | #endif |
@@ -6583,13 +7284,106 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
6583 | } | 7284 | } |
6584 | 7285 | ||
6585 | /* | 7286 | /* |
7287 | * Initializers for schedule domains | ||
7288 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() | ||
7289 | */ | ||
7290 | |||
7291 | #define SD_INIT(sd, type) sd_init_##type(sd) | ||
7292 | #define SD_INIT_FUNC(type) \ | ||
7293 | static noinline void sd_init_##type(struct sched_domain *sd) \ | ||
7294 | { \ | ||
7295 | memset(sd, 0, sizeof(*sd)); \ | ||
7296 | *sd = SD_##type##_INIT; \ | ||
7297 | sd->level = SD_LV_##type; \ | ||
7298 | } | ||
7299 | |||
7300 | SD_INIT_FUNC(CPU) | ||
7301 | #ifdef CONFIG_NUMA | ||
7302 | SD_INIT_FUNC(ALLNODES) | ||
7303 | SD_INIT_FUNC(NODE) | ||
7304 | #endif | ||
7305 | #ifdef CONFIG_SCHED_SMT | ||
7306 | SD_INIT_FUNC(SIBLING) | ||
7307 | #endif | ||
7308 | #ifdef CONFIG_SCHED_MC | ||
7309 | SD_INIT_FUNC(MC) | ||
7310 | #endif | ||
7311 | |||
7312 | /* | ||
7313 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7314 | * space as the usage in build_sched_domains() dictates. Used only | ||
7315 | * if the amount of space is significant. | ||
7316 | */ | ||
7317 | struct allmasks { | ||
7318 | cpumask_t tmpmask; /* make this one first */ | ||
7319 | union { | ||
7320 | cpumask_t nodemask; | ||
7321 | cpumask_t this_sibling_map; | ||
7322 | cpumask_t this_core_map; | ||
7323 | }; | ||
7324 | cpumask_t send_covered; | ||
7325 | |||
7326 | #ifdef CONFIG_NUMA | ||
7327 | cpumask_t domainspan; | ||
7328 | cpumask_t covered; | ||
7329 | cpumask_t notcovered; | ||
7330 | #endif | ||
7331 | }; | ||
7332 | |||
7333 | #if NR_CPUS > 128 | ||
7334 | #define SCHED_CPUMASK_ALLOC 1 | ||
7335 | #define SCHED_CPUMASK_FREE(v) kfree(v) | ||
7336 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7337 | #else | ||
7338 | #define SCHED_CPUMASK_ALLOC 0 | ||
7339 | #define SCHED_CPUMASK_FREE(v) | ||
7340 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7341 | #endif | ||
7342 | |||
7343 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7344 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7345 | |||
7346 | static int default_relax_domain_level = -1; | ||
7347 | |||
7348 | static int __init setup_relax_domain_level(char *str) | ||
7349 | { | ||
7350 | default_relax_domain_level = simple_strtoul(str, NULL, 0); | ||
7351 | return 1; | ||
7352 | } | ||
7353 | __setup("relax_domain_level=", setup_relax_domain_level); | ||
7354 | |||
7355 | static void set_domain_attribute(struct sched_domain *sd, | ||
7356 | struct sched_domain_attr *attr) | ||
7357 | { | ||
7358 | int request; | ||
7359 | |||
7360 | if (!attr || attr->relax_domain_level < 0) { | ||
7361 | if (default_relax_domain_level < 0) | ||
7362 | return; | ||
7363 | else | ||
7364 | request = default_relax_domain_level; | ||
7365 | } else | ||
7366 | request = attr->relax_domain_level; | ||
7367 | if (request < sd->level) { | ||
7368 | /* turn off idle balance on this domain */ | ||
7369 | sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE); | ||
7370 | } else { | ||
7371 | /* turn on idle balance on this domain */ | ||
7372 | sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE); | ||
7373 | } | ||
7374 | } | ||
7375 | |||
7376 | /* | ||
6586 | * Build sched domains for a given set of cpus and attach the sched domains | 7377 | * Build sched domains for a given set of cpus and attach the sched domains |
6587 | * to the individual cpus | 7378 | * to the individual cpus |
6588 | */ | 7379 | */ |
6589 | static int build_sched_domains(const cpumask_t *cpu_map) | 7380 | static int __build_sched_domains(const cpumask_t *cpu_map, |
7381 | struct sched_domain_attr *attr) | ||
6590 | { | 7382 | { |
6591 | int i; | 7383 | int i; |
6592 | struct root_domain *rd; | 7384 | struct root_domain *rd; |
7385 | SCHED_CPUMASK_DECLARE(allmasks); | ||
7386 | cpumask_t *tmpmask; | ||
6593 | #ifdef CONFIG_NUMA | 7387 | #ifdef CONFIG_NUMA |
6594 | struct sched_group **sched_group_nodes = NULL; | 7388 | struct sched_group **sched_group_nodes = NULL; |
6595 | int sd_allnodes = 0; | 7389 | int sd_allnodes = 0; |
@@ -6603,39 +7397,65 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6603 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7397 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
6604 | return -ENOMEM; | 7398 | return -ENOMEM; |
6605 | } | 7399 | } |
6606 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
6607 | #endif | 7400 | #endif |
6608 | 7401 | ||
6609 | rd = alloc_rootdomain(); | 7402 | rd = alloc_rootdomain(); |
6610 | if (!rd) { | 7403 | if (!rd) { |
6611 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7404 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7405 | #ifdef CONFIG_NUMA | ||
7406 | kfree(sched_group_nodes); | ||
7407 | #endif | ||
6612 | return -ENOMEM; | 7408 | return -ENOMEM; |
6613 | } | 7409 | } |
6614 | 7410 | ||
7411 | #if SCHED_CPUMASK_ALLOC | ||
7412 | /* get space for all scratch cpumask variables */ | ||
7413 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | ||
7414 | if (!allmasks) { | ||
7415 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7416 | kfree(rd); | ||
7417 | #ifdef CONFIG_NUMA | ||
7418 | kfree(sched_group_nodes); | ||
7419 | #endif | ||
7420 | return -ENOMEM; | ||
7421 | } | ||
7422 | #endif | ||
7423 | tmpmask = (cpumask_t *)allmasks; | ||
7424 | |||
7425 | |||
7426 | #ifdef CONFIG_NUMA | ||
7427 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
7428 | #endif | ||
7429 | |||
6615 | /* | 7430 | /* |
6616 | * Set up domains for cpus specified by the cpu_map. | 7431 | * Set up domains for cpus specified by the cpu_map. |
6617 | */ | 7432 | */ |
6618 | for_each_cpu_mask(i, *cpu_map) { | 7433 | for_each_cpu_mask(i, *cpu_map) { |
6619 | struct sched_domain *sd = NULL, *p; | 7434 | struct sched_domain *sd = NULL, *p; |
6620 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); | 7435 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
6621 | 7436 | ||
6622 | cpus_and(nodemask, nodemask, *cpu_map); | 7437 | *nodemask = node_to_cpumask(cpu_to_node(i)); |
7438 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
6623 | 7439 | ||
6624 | #ifdef CONFIG_NUMA | 7440 | #ifdef CONFIG_NUMA |
6625 | if (cpus_weight(*cpu_map) > | 7441 | if (cpus_weight(*cpu_map) > |
6626 | SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | 7442 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { |
6627 | sd = &per_cpu(allnodes_domains, i); | 7443 | sd = &per_cpu(allnodes_domains, i); |
6628 | *sd = SD_ALLNODES_INIT; | 7444 | SD_INIT(sd, ALLNODES); |
7445 | set_domain_attribute(sd, attr); | ||
6629 | sd->span = *cpu_map; | 7446 | sd->span = *cpu_map; |
6630 | cpu_to_allnodes_group(i, cpu_map, &sd->groups); | 7447 | sd->first_cpu = first_cpu(sd->span); |
7448 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | ||
6631 | p = sd; | 7449 | p = sd; |
6632 | sd_allnodes = 1; | 7450 | sd_allnodes = 1; |
6633 | } else | 7451 | } else |
6634 | p = NULL; | 7452 | p = NULL; |
6635 | 7453 | ||
6636 | sd = &per_cpu(node_domains, i); | 7454 | sd = &per_cpu(node_domains, i); |
6637 | *sd = SD_NODE_INIT; | 7455 | SD_INIT(sd, NODE); |
6638 | sd->span = sched_domain_node_span(cpu_to_node(i)); | 7456 | set_domain_attribute(sd, attr); |
7457 | sched_domain_node_span(cpu_to_node(i), &sd->span); | ||
7458 | sd->first_cpu = first_cpu(sd->span); | ||
6639 | sd->parent = p; | 7459 | sd->parent = p; |
6640 | if (p) | 7460 | if (p) |
6641 | p->child = sd; | 7461 | p->child = sd; |
@@ -6644,94 +7464,120 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6644 | 7464 | ||
6645 | p = sd; | 7465 | p = sd; |
6646 | sd = &per_cpu(phys_domains, i); | 7466 | sd = &per_cpu(phys_domains, i); |
6647 | *sd = SD_CPU_INIT; | 7467 | SD_INIT(sd, CPU); |
6648 | sd->span = nodemask; | 7468 | set_domain_attribute(sd, attr); |
7469 | sd->span = *nodemask; | ||
7470 | sd->first_cpu = first_cpu(sd->span); | ||
6649 | sd->parent = p; | 7471 | sd->parent = p; |
6650 | if (p) | 7472 | if (p) |
6651 | p->child = sd; | 7473 | p->child = sd; |
6652 | cpu_to_phys_group(i, cpu_map, &sd->groups); | 7474 | cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); |
6653 | 7475 | ||
6654 | #ifdef CONFIG_SCHED_MC | 7476 | #ifdef CONFIG_SCHED_MC |
6655 | p = sd; | 7477 | p = sd; |
6656 | sd = &per_cpu(core_domains, i); | 7478 | sd = &per_cpu(core_domains, i); |
6657 | *sd = SD_MC_INIT; | 7479 | SD_INIT(sd, MC); |
7480 | set_domain_attribute(sd, attr); | ||
6658 | sd->span = cpu_coregroup_map(i); | 7481 | sd->span = cpu_coregroup_map(i); |
7482 | sd->first_cpu = first_cpu(sd->span); | ||
6659 | cpus_and(sd->span, sd->span, *cpu_map); | 7483 | cpus_and(sd->span, sd->span, *cpu_map); |
6660 | sd->parent = p; | 7484 | sd->parent = p; |
6661 | p->child = sd; | 7485 | p->child = sd; |
6662 | cpu_to_core_group(i, cpu_map, &sd->groups); | 7486 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
6663 | #endif | 7487 | #endif |
6664 | 7488 | ||
6665 | #ifdef CONFIG_SCHED_SMT | 7489 | #ifdef CONFIG_SCHED_SMT |
6666 | p = sd; | 7490 | p = sd; |
6667 | sd = &per_cpu(cpu_domains, i); | 7491 | sd = &per_cpu(cpu_domains, i); |
6668 | *sd = SD_SIBLING_INIT; | 7492 | SD_INIT(sd, SIBLING); |
7493 | set_domain_attribute(sd, attr); | ||
6669 | sd->span = per_cpu(cpu_sibling_map, i); | 7494 | sd->span = per_cpu(cpu_sibling_map, i); |
7495 | sd->first_cpu = first_cpu(sd->span); | ||
6670 | cpus_and(sd->span, sd->span, *cpu_map); | 7496 | cpus_and(sd->span, sd->span, *cpu_map); |
6671 | sd->parent = p; | 7497 | sd->parent = p; |
6672 | p->child = sd; | 7498 | p->child = sd; |
6673 | cpu_to_cpu_group(i, cpu_map, &sd->groups); | 7499 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
6674 | #endif | 7500 | #endif |
6675 | } | 7501 | } |
6676 | 7502 | ||
6677 | #ifdef CONFIG_SCHED_SMT | 7503 | #ifdef CONFIG_SCHED_SMT |
6678 | /* Set up CPU (sibling) groups */ | 7504 | /* Set up CPU (sibling) groups */ |
6679 | for_each_cpu_mask(i, *cpu_map) { | 7505 | for_each_cpu_mask(i, *cpu_map) { |
6680 | cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i); | 7506 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); |
6681 | cpus_and(this_sibling_map, this_sibling_map, *cpu_map); | 7507 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
6682 | if (i != first_cpu(this_sibling_map)) | 7508 | |
7509 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7510 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7511 | if (i != first_cpu(*this_sibling_map)) | ||
6683 | continue; | 7512 | continue; |
6684 | 7513 | ||
6685 | init_sched_build_groups(this_sibling_map, cpu_map, | 7514 | init_sched_build_groups(this_sibling_map, cpu_map, |
6686 | &cpu_to_cpu_group); | 7515 | &cpu_to_cpu_group, |
7516 | send_covered, tmpmask); | ||
6687 | } | 7517 | } |
6688 | #endif | 7518 | #endif |
6689 | 7519 | ||
6690 | #ifdef CONFIG_SCHED_MC | 7520 | #ifdef CONFIG_SCHED_MC |
6691 | /* Set up multi-core groups */ | 7521 | /* Set up multi-core groups */ |
6692 | for_each_cpu_mask(i, *cpu_map) { | 7522 | for_each_cpu_mask(i, *cpu_map) { |
6693 | cpumask_t this_core_map = cpu_coregroup_map(i); | 7523 | SCHED_CPUMASK_VAR(this_core_map, allmasks); |
6694 | cpus_and(this_core_map, this_core_map, *cpu_map); | 7524 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
6695 | if (i != first_cpu(this_core_map)) | 7525 | |
7526 | *this_core_map = cpu_coregroup_map(i); | ||
7527 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7528 | if (i != first_cpu(*this_core_map)) | ||
6696 | continue; | 7529 | continue; |
7530 | |||
6697 | init_sched_build_groups(this_core_map, cpu_map, | 7531 | init_sched_build_groups(this_core_map, cpu_map, |
6698 | &cpu_to_core_group); | 7532 | &cpu_to_core_group, |
7533 | send_covered, tmpmask); | ||
6699 | } | 7534 | } |
6700 | #endif | 7535 | #endif |
6701 | 7536 | ||
6702 | /* Set up physical groups */ | 7537 | /* Set up physical groups */ |
6703 | for (i = 0; i < MAX_NUMNODES; i++) { | 7538 | for (i = 0; i < MAX_NUMNODES; i++) { |
6704 | cpumask_t nodemask = node_to_cpumask(i); | 7539 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
7540 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
6705 | 7541 | ||
6706 | cpus_and(nodemask, nodemask, *cpu_map); | 7542 | *nodemask = node_to_cpumask(i); |
6707 | if (cpus_empty(nodemask)) | 7543 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7544 | if (cpus_empty(*nodemask)) | ||
6708 | continue; | 7545 | continue; |
6709 | 7546 | ||
6710 | init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group); | 7547 | init_sched_build_groups(nodemask, cpu_map, |
7548 | &cpu_to_phys_group, | ||
7549 | send_covered, tmpmask); | ||
6711 | } | 7550 | } |
6712 | 7551 | ||
6713 | #ifdef CONFIG_NUMA | 7552 | #ifdef CONFIG_NUMA |
6714 | /* Set up node groups */ | 7553 | /* Set up node groups */ |
6715 | if (sd_allnodes) | 7554 | if (sd_allnodes) { |
6716 | init_sched_build_groups(*cpu_map, cpu_map, | 7555 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
6717 | &cpu_to_allnodes_group); | 7556 | |
7557 | init_sched_build_groups(cpu_map, cpu_map, | ||
7558 | &cpu_to_allnodes_group, | ||
7559 | send_covered, tmpmask); | ||
7560 | } | ||
6718 | 7561 | ||
6719 | for (i = 0; i < MAX_NUMNODES; i++) { | 7562 | for (i = 0; i < MAX_NUMNODES; i++) { |
6720 | /* Set up node groups */ | 7563 | /* Set up node groups */ |
6721 | struct sched_group *sg, *prev; | 7564 | struct sched_group *sg, *prev; |
6722 | cpumask_t nodemask = node_to_cpumask(i); | 7565 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
6723 | cpumask_t domainspan; | 7566 | SCHED_CPUMASK_VAR(domainspan, allmasks); |
6724 | cpumask_t covered = CPU_MASK_NONE; | 7567 | SCHED_CPUMASK_VAR(covered, allmasks); |
6725 | int j; | 7568 | int j; |
6726 | 7569 | ||
6727 | cpus_and(nodemask, nodemask, *cpu_map); | 7570 | *nodemask = node_to_cpumask(i); |
6728 | if (cpus_empty(nodemask)) { | 7571 | cpus_clear(*covered); |
7572 | |||
7573 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7574 | if (cpus_empty(*nodemask)) { | ||
6729 | sched_group_nodes[i] = NULL; | 7575 | sched_group_nodes[i] = NULL; |
6730 | continue; | 7576 | continue; |
6731 | } | 7577 | } |
6732 | 7578 | ||
6733 | domainspan = sched_domain_node_span(i); | 7579 | sched_domain_node_span(i, domainspan); |
6734 | cpus_and(domainspan, domainspan, *cpu_map); | 7580 | cpus_and(*domainspan, *domainspan, *cpu_map); |
6735 | 7581 | ||
6736 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7582 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); |
6737 | if (!sg) { | 7583 | if (!sg) { |
@@ -6740,31 +7586,31 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6740 | goto error; | 7586 | goto error; |
6741 | } | 7587 | } |
6742 | sched_group_nodes[i] = sg; | 7588 | sched_group_nodes[i] = sg; |
6743 | for_each_cpu_mask(j, nodemask) { | 7589 | for_each_cpu_mask(j, *nodemask) { |
6744 | struct sched_domain *sd; | 7590 | struct sched_domain *sd; |
6745 | 7591 | ||
6746 | sd = &per_cpu(node_domains, j); | 7592 | sd = &per_cpu(node_domains, j); |
6747 | sd->groups = sg; | 7593 | sd->groups = sg; |
6748 | } | 7594 | } |
6749 | sg->__cpu_power = 0; | 7595 | sg->__cpu_power = 0; |
6750 | sg->cpumask = nodemask; | 7596 | sg->cpumask = *nodemask; |
6751 | sg->next = sg; | 7597 | sg->next = sg; |
6752 | cpus_or(covered, covered, nodemask); | 7598 | cpus_or(*covered, *covered, *nodemask); |
6753 | prev = sg; | 7599 | prev = sg; |
6754 | 7600 | ||
6755 | for (j = 0; j < MAX_NUMNODES; j++) { | 7601 | for (j = 0; j < MAX_NUMNODES; j++) { |
6756 | cpumask_t tmp, notcovered; | 7602 | SCHED_CPUMASK_VAR(notcovered, allmasks); |
6757 | int n = (i + j) % MAX_NUMNODES; | 7603 | int n = (i + j) % MAX_NUMNODES; |
7604 | node_to_cpumask_ptr(pnodemask, n); | ||
6758 | 7605 | ||
6759 | cpus_complement(notcovered, covered); | 7606 | cpus_complement(*notcovered, *covered); |
6760 | cpus_and(tmp, notcovered, *cpu_map); | 7607 | cpus_and(*tmpmask, *notcovered, *cpu_map); |
6761 | cpus_and(tmp, tmp, domainspan); | 7608 | cpus_and(*tmpmask, *tmpmask, *domainspan); |
6762 | if (cpus_empty(tmp)) | 7609 | if (cpus_empty(*tmpmask)) |
6763 | break; | 7610 | break; |
6764 | 7611 | ||
6765 | nodemask = node_to_cpumask(n); | 7612 | cpus_and(*tmpmask, *tmpmask, *pnodemask); |
6766 | cpus_and(tmp, tmp, nodemask); | 7613 | if (cpus_empty(*tmpmask)) |
6767 | if (cpus_empty(tmp)) | ||
6768 | continue; | 7614 | continue; |
6769 | 7615 | ||
6770 | sg = kmalloc_node(sizeof(struct sched_group), | 7616 | sg = kmalloc_node(sizeof(struct sched_group), |
@@ -6775,9 +7621,9 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6775 | goto error; | 7621 | goto error; |
6776 | } | 7622 | } |
6777 | sg->__cpu_power = 0; | 7623 | sg->__cpu_power = 0; |
6778 | sg->cpumask = tmp; | 7624 | sg->cpumask = *tmpmask; |
6779 | sg->next = prev->next; | 7625 | sg->next = prev->next; |
6780 | cpus_or(covered, covered, tmp); | 7626 | cpus_or(*covered, *covered, *tmpmask); |
6781 | prev->next = sg; | 7627 | prev->next = sg; |
6782 | prev = sg; | 7628 | prev = sg; |
6783 | } | 7629 | } |
@@ -6813,7 +7659,8 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6813 | if (sd_allnodes) { | 7659 | if (sd_allnodes) { |
6814 | struct sched_group *sg; | 7660 | struct sched_group *sg; |
6815 | 7661 | ||
6816 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg); | 7662 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, |
7663 | tmpmask); | ||
6817 | init_numa_sched_groups_power(sg); | 7664 | init_numa_sched_groups_power(sg); |
6818 | } | 7665 | } |
6819 | #endif | 7666 | #endif |
@@ -6831,17 +7678,26 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6831 | cpu_attach_domain(sd, rd, i); | 7678 | cpu_attach_domain(sd, rd, i); |
6832 | } | 7679 | } |
6833 | 7680 | ||
7681 | SCHED_CPUMASK_FREE((void *)allmasks); | ||
6834 | return 0; | 7682 | return 0; |
6835 | 7683 | ||
6836 | #ifdef CONFIG_NUMA | 7684 | #ifdef CONFIG_NUMA |
6837 | error: | 7685 | error: |
6838 | free_sched_groups(cpu_map); | 7686 | free_sched_groups(cpu_map, tmpmask); |
7687 | SCHED_CPUMASK_FREE((void *)allmasks); | ||
6839 | return -ENOMEM; | 7688 | return -ENOMEM; |
6840 | #endif | 7689 | #endif |
6841 | } | 7690 | } |
6842 | 7691 | ||
7692 | static int build_sched_domains(const cpumask_t *cpu_map) | ||
7693 | { | ||
7694 | return __build_sched_domains(cpu_map, NULL); | ||
7695 | } | ||
7696 | |||
6843 | static cpumask_t *doms_cur; /* current sched domains */ | 7697 | static cpumask_t *doms_cur; /* current sched domains */ |
6844 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7698 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7699 | static struct sched_domain_attr *dattr_cur; /* attribues of custom domains | ||
7700 | in 'doms_cur' */ | ||
6845 | 7701 | ||
6846 | /* | 7702 | /* |
6847 | * Special case: If a kmalloc of a doms_cur partition (array of | 7703 | * Special case: If a kmalloc of a doms_cur partition (array of |
@@ -6869,15 +7725,17 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
6869 | if (!doms_cur) | 7725 | if (!doms_cur) |
6870 | doms_cur = &fallback_doms; | 7726 | doms_cur = &fallback_doms; |
6871 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7727 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); |
7728 | dattr_cur = NULL; | ||
6872 | err = build_sched_domains(doms_cur); | 7729 | err = build_sched_domains(doms_cur); |
6873 | register_sched_domain_sysctl(); | 7730 | register_sched_domain_sysctl(); |
6874 | 7731 | ||
6875 | return err; | 7732 | return err; |
6876 | } | 7733 | } |
6877 | 7734 | ||
6878 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | 7735 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, |
7736 | cpumask_t *tmpmask) | ||
6879 | { | 7737 | { |
6880 | free_sched_groups(cpu_map); | 7738 | free_sched_groups(cpu_map, tmpmask); |
6881 | } | 7739 | } |
6882 | 7740 | ||
6883 | /* | 7741 | /* |
@@ -6886,6 +7744,7 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | |||
6886 | */ | 7744 | */ |
6887 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7745 | static void detach_destroy_domains(const cpumask_t *cpu_map) |
6888 | { | 7746 | { |
7747 | cpumask_t tmpmask; | ||
6889 | int i; | 7748 | int i; |
6890 | 7749 | ||
6891 | unregister_sched_domain_sysctl(); | 7750 | unregister_sched_domain_sysctl(); |
@@ -6893,7 +7752,23 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6893 | for_each_cpu_mask(i, *cpu_map) | 7752 | for_each_cpu_mask(i, *cpu_map) |
6894 | cpu_attach_domain(NULL, &def_root_domain, i); | 7753 | cpu_attach_domain(NULL, &def_root_domain, i); |
6895 | synchronize_sched(); | 7754 | synchronize_sched(); |
6896 | arch_destroy_sched_domains(cpu_map); | 7755 | arch_destroy_sched_domains(cpu_map, &tmpmask); |
7756 | } | ||
7757 | |||
7758 | /* handle null as "default" */ | ||
7759 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | ||
7760 | struct sched_domain_attr *new, int idx_new) | ||
7761 | { | ||
7762 | struct sched_domain_attr tmp; | ||
7763 | |||
7764 | /* fast path */ | ||
7765 | if (!new && !cur) | ||
7766 | return 1; | ||
7767 | |||
7768 | tmp = SD_ATTR_INIT; | ||
7769 | return !memcmp(cur ? (cur + idx_cur) : &tmp, | ||
7770 | new ? (new + idx_new) : &tmp, | ||
7771 | sizeof(struct sched_domain_attr)); | ||
6897 | } | 7772 | } |
6898 | 7773 | ||
6899 | /* | 7774 | /* |
@@ -6917,7 +7792,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6917 | * | 7792 | * |
6918 | * Call with hotplug lock held | 7793 | * Call with hotplug lock held |
6919 | */ | 7794 | */ |
6920 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) | 7795 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
7796 | struct sched_domain_attr *dattr_new) | ||
6921 | { | 7797 | { |
6922 | int i, j; | 7798 | int i, j; |
6923 | 7799 | ||
@@ -6930,12 +7806,14 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) | |||
6930 | ndoms_new = 1; | 7806 | ndoms_new = 1; |
6931 | doms_new = &fallback_doms; | 7807 | doms_new = &fallback_doms; |
6932 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7808 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
7809 | dattr_new = NULL; | ||
6933 | } | 7810 | } |
6934 | 7811 | ||
6935 | /* Destroy deleted domains */ | 7812 | /* Destroy deleted domains */ |
6936 | for (i = 0; i < ndoms_cur; i++) { | 7813 | for (i = 0; i < ndoms_cur; i++) { |
6937 | for (j = 0; j < ndoms_new; j++) { | 7814 | for (j = 0; j < ndoms_new; j++) { |
6938 | if (cpus_equal(doms_cur[i], doms_new[j])) | 7815 | if (cpus_equal(doms_cur[i], doms_new[j]) |
7816 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | ||
6939 | goto match1; | 7817 | goto match1; |
6940 | } | 7818 | } |
6941 | /* no match - a current sched domain not in new doms_new[] */ | 7819 | /* no match - a current sched domain not in new doms_new[] */ |
@@ -6947,11 +7825,13 @@ match1: | |||
6947 | /* Build new domains */ | 7825 | /* Build new domains */ |
6948 | for (i = 0; i < ndoms_new; i++) { | 7826 | for (i = 0; i < ndoms_new; i++) { |
6949 | for (j = 0; j < ndoms_cur; j++) { | 7827 | for (j = 0; j < ndoms_cur; j++) { |
6950 | if (cpus_equal(doms_new[i], doms_cur[j])) | 7828 | if (cpus_equal(doms_new[i], doms_cur[j]) |
7829 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | ||
6951 | goto match2; | 7830 | goto match2; |
6952 | } | 7831 | } |
6953 | /* no match - add a new doms_new */ | 7832 | /* no match - add a new doms_new */ |
6954 | build_sched_domains(doms_new + i); | 7833 | __build_sched_domains(doms_new + i, |
7834 | dattr_new ? dattr_new + i : NULL); | ||
6955 | match2: | 7835 | match2: |
6956 | ; | 7836 | ; |
6957 | } | 7837 | } |
@@ -6959,7 +7839,9 @@ match2: | |||
6959 | /* Remember the new sched domains */ | 7839 | /* Remember the new sched domains */ |
6960 | if (doms_cur != &fallback_doms) | 7840 | if (doms_cur != &fallback_doms) |
6961 | kfree(doms_cur); | 7841 | kfree(doms_cur); |
7842 | kfree(dattr_cur); /* kfree(NULL) is safe */ | ||
6962 | doms_cur = doms_new; | 7843 | doms_cur = doms_new; |
7844 | dattr_cur = dattr_new; | ||
6963 | ndoms_cur = ndoms_new; | 7845 | ndoms_cur = ndoms_new; |
6964 | 7846 | ||
6965 | register_sched_domain_sysctl(); | 7847 | register_sched_domain_sysctl(); |
@@ -7086,6 +7968,11 @@ void __init sched_init_smp(void) | |||
7086 | { | 7968 | { |
7087 | cpumask_t non_isolated_cpus; | 7969 | cpumask_t non_isolated_cpus; |
7088 | 7970 | ||
7971 | #if defined(CONFIG_NUMA) | ||
7972 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
7973 | GFP_KERNEL); | ||
7974 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
7975 | #endif | ||
7089 | get_online_cpus(); | 7976 | get_online_cpus(); |
7090 | arch_init_sched_domains(&cpu_online_map); | 7977 | arch_init_sched_domains(&cpu_online_map); |
7091 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 7978 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
@@ -7096,13 +7983,18 @@ void __init sched_init_smp(void) | |||
7096 | hotcpu_notifier(update_sched_domains, 0); | 7983 | hotcpu_notifier(update_sched_domains, 0); |
7097 | 7984 | ||
7098 | /* Move init over to a non-isolated CPU */ | 7985 | /* Move init over to a non-isolated CPU */ |
7099 | if (set_cpus_allowed(current, non_isolated_cpus) < 0) | 7986 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) |
7100 | BUG(); | 7987 | BUG(); |
7101 | sched_init_granularity(); | 7988 | sched_init_granularity(); |
7102 | } | 7989 | } |
7103 | #else | 7990 | #else |
7104 | void __init sched_init_smp(void) | 7991 | void __init sched_init_smp(void) |
7105 | { | 7992 | { |
7993 | #if defined(CONFIG_NUMA) | ||
7994 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
7995 | GFP_KERNEL); | ||
7996 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
7997 | #endif | ||
7106 | sched_init_granularity(); | 7998 | sched_init_granularity(); |
7107 | } | 7999 | } |
7108 | #endif /* CONFIG_SMP */ | 8000 | #endif /* CONFIG_SMP */ |
@@ -7117,6 +8009,7 @@ int in_sched_functions(unsigned long addr) | |||
7117 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | 8009 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) |
7118 | { | 8010 | { |
7119 | cfs_rq->tasks_timeline = RB_ROOT; | 8011 | cfs_rq->tasks_timeline = RB_ROOT; |
8012 | INIT_LIST_HEAD(&cfs_rq->tasks); | ||
7120 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8013 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7121 | cfs_rq->rq = rq; | 8014 | cfs_rq->rq = rq; |
7122 | #endif | 8015 | #endif |
@@ -7146,6 +8039,8 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7146 | 8039 | ||
7147 | rt_rq->rt_time = 0; | 8040 | rt_rq->rt_time = 0; |
7148 | rt_rq->rt_throttled = 0; | 8041 | rt_rq->rt_throttled = 0; |
8042 | rt_rq->rt_runtime = 0; | ||
8043 | spin_lock_init(&rt_rq->rt_runtime_lock); | ||
7149 | 8044 | ||
7150 | #ifdef CONFIG_RT_GROUP_SCHED | 8045 | #ifdef CONFIG_RT_GROUP_SCHED |
7151 | rt_rq->rt_nr_boosted = 0; | 8046 | rt_rq->rt_nr_boosted = 0; |
@@ -7154,10 +8049,11 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7154 | } | 8049 | } |
7155 | 8050 | ||
7156 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8051 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7157 | static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg, | 8052 | static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
7158 | struct cfs_rq *cfs_rq, struct sched_entity *se, | 8053 | struct sched_entity *se, int cpu, int add, |
7159 | int cpu, int add) | 8054 | struct sched_entity *parent) |
7160 | { | 8055 | { |
8056 | struct rq *rq = cpu_rq(cpu); | ||
7161 | tg->cfs_rq[cpu] = cfs_rq; | 8057 | tg->cfs_rq[cpu] = cfs_rq; |
7162 | init_cfs_rq(cfs_rq, rq); | 8058 | init_cfs_rq(cfs_rq, rq); |
7163 | cfs_rq->tg = tg; | 8059 | cfs_rq->tg = tg; |
@@ -7165,45 +8061,132 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg, | |||
7165 | list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); | 8061 | list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); |
7166 | 8062 | ||
7167 | tg->se[cpu] = se; | 8063 | tg->se[cpu] = se; |
7168 | se->cfs_rq = &rq->cfs; | 8064 | /* se could be NULL for init_task_group */ |
8065 | if (!se) | ||
8066 | return; | ||
8067 | |||
8068 | if (!parent) | ||
8069 | se->cfs_rq = &rq->cfs; | ||
8070 | else | ||
8071 | se->cfs_rq = parent->my_q; | ||
8072 | |||
7169 | se->my_q = cfs_rq; | 8073 | se->my_q = cfs_rq; |
7170 | se->load.weight = tg->shares; | 8074 | se->load.weight = tg->shares; |
7171 | se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); | 8075 | se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); |
7172 | se->parent = NULL; | 8076 | se->parent = parent; |
7173 | } | 8077 | } |
7174 | #endif | 8078 | #endif |
7175 | 8079 | ||
7176 | #ifdef CONFIG_RT_GROUP_SCHED | 8080 | #ifdef CONFIG_RT_GROUP_SCHED |
7177 | static void init_tg_rt_entry(struct rq *rq, struct task_group *tg, | 8081 | static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
7178 | struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, | 8082 | struct sched_rt_entity *rt_se, int cpu, int add, |
7179 | int cpu, int add) | 8083 | struct sched_rt_entity *parent) |
7180 | { | 8084 | { |
8085 | struct rq *rq = cpu_rq(cpu); | ||
8086 | |||
7181 | tg->rt_rq[cpu] = rt_rq; | 8087 | tg->rt_rq[cpu] = rt_rq; |
7182 | init_rt_rq(rt_rq, rq); | 8088 | init_rt_rq(rt_rq, rq); |
7183 | rt_rq->tg = tg; | 8089 | rt_rq->tg = tg; |
7184 | rt_rq->rt_se = rt_se; | 8090 | rt_rq->rt_se = rt_se; |
8091 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
7185 | if (add) | 8092 | if (add) |
7186 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); | 8093 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); |
7187 | 8094 | ||
7188 | tg->rt_se[cpu] = rt_se; | 8095 | tg->rt_se[cpu] = rt_se; |
8096 | if (!rt_se) | ||
8097 | return; | ||
8098 | |||
8099 | if (!parent) | ||
8100 | rt_se->rt_rq = &rq->rt; | ||
8101 | else | ||
8102 | rt_se->rt_rq = parent->my_q; | ||
8103 | |||
7189 | rt_se->rt_rq = &rq->rt; | 8104 | rt_se->rt_rq = &rq->rt; |
7190 | rt_se->my_q = rt_rq; | 8105 | rt_se->my_q = rt_rq; |
7191 | rt_se->parent = NULL; | 8106 | rt_se->parent = parent; |
7192 | INIT_LIST_HEAD(&rt_se->run_list); | 8107 | INIT_LIST_HEAD(&rt_se->run_list); |
7193 | } | 8108 | } |
7194 | #endif | 8109 | #endif |
7195 | 8110 | ||
7196 | void __init sched_init(void) | 8111 | void __init sched_init(void) |
7197 | { | 8112 | { |
7198 | int highest_cpu = 0; | ||
7199 | int i, j; | 8113 | int i, j; |
8114 | unsigned long alloc_size = 0, ptr; | ||
8115 | |||
8116 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
8117 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | ||
8118 | #endif | ||
8119 | #ifdef CONFIG_RT_GROUP_SCHED | ||
8120 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | ||
8121 | #endif | ||
8122 | #ifdef CONFIG_USER_SCHED | ||
8123 | alloc_size *= 2; | ||
8124 | #endif | ||
8125 | /* | ||
8126 | * As sched_init() is called before page_alloc is setup, | ||
8127 | * we use alloc_bootmem(). | ||
8128 | */ | ||
8129 | if (alloc_size) { | ||
8130 | ptr = (unsigned long)alloc_bootmem_low(alloc_size); | ||
8131 | |||
8132 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
8133 | init_task_group.se = (struct sched_entity **)ptr; | ||
8134 | ptr += nr_cpu_ids * sizeof(void **); | ||
8135 | |||
8136 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
8137 | ptr += nr_cpu_ids * sizeof(void **); | ||
8138 | |||
8139 | #ifdef CONFIG_USER_SCHED | ||
8140 | root_task_group.se = (struct sched_entity **)ptr; | ||
8141 | ptr += nr_cpu_ids * sizeof(void **); | ||
8142 | |||
8143 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
8144 | ptr += nr_cpu_ids * sizeof(void **); | ||
8145 | #endif | ||
8146 | #endif | ||
8147 | #ifdef CONFIG_RT_GROUP_SCHED | ||
8148 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
8149 | ptr += nr_cpu_ids * sizeof(void **); | ||
8150 | |||
8151 | init_task_group.rt_rq = (struct rt_rq **)ptr; | ||
8152 | ptr += nr_cpu_ids * sizeof(void **); | ||
8153 | |||
8154 | #ifdef CONFIG_USER_SCHED | ||
8155 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
8156 | ptr += nr_cpu_ids * sizeof(void **); | ||
8157 | |||
8158 | root_task_group.rt_rq = (struct rt_rq **)ptr; | ||
8159 | ptr += nr_cpu_ids * sizeof(void **); | ||
8160 | #endif | ||
8161 | #endif | ||
8162 | } | ||
7200 | 8163 | ||
7201 | #ifdef CONFIG_SMP | 8164 | #ifdef CONFIG_SMP |
8165 | init_aggregate(); | ||
7202 | init_defrootdomain(); | 8166 | init_defrootdomain(); |
7203 | #endif | 8167 | #endif |
7204 | 8168 | ||
8169 | init_rt_bandwidth(&def_rt_bandwidth, | ||
8170 | global_rt_period(), global_rt_runtime()); | ||
8171 | |||
8172 | #ifdef CONFIG_RT_GROUP_SCHED | ||
8173 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | ||
8174 | global_rt_period(), global_rt_runtime()); | ||
8175 | #ifdef CONFIG_USER_SCHED | ||
8176 | init_rt_bandwidth(&root_task_group.rt_bandwidth, | ||
8177 | global_rt_period(), RUNTIME_INF); | ||
8178 | #endif | ||
8179 | #endif | ||
8180 | |||
7205 | #ifdef CONFIG_GROUP_SCHED | 8181 | #ifdef CONFIG_GROUP_SCHED |
7206 | list_add(&init_task_group.list, &task_groups); | 8182 | list_add(&init_task_group.list, &task_groups); |
8183 | INIT_LIST_HEAD(&init_task_group.children); | ||
8184 | |||
8185 | #ifdef CONFIG_USER_SCHED | ||
8186 | INIT_LIST_HEAD(&root_task_group.children); | ||
8187 | init_task_group.parent = &root_task_group; | ||
8188 | list_add(&init_task_group.siblings, &root_task_group.children); | ||
8189 | #endif | ||
7207 | #endif | 8190 | #endif |
7208 | 8191 | ||
7209 | for_each_possible_cpu(i) { | 8192 | for_each_possible_cpu(i) { |
@@ -7214,26 +8197,68 @@ void __init sched_init(void) | |||
7214 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | 8197 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); |
7215 | rq->nr_running = 0; | 8198 | rq->nr_running = 0; |
7216 | rq->clock = 1; | 8199 | rq->clock = 1; |
8200 | update_last_tick_seen(rq); | ||
7217 | init_cfs_rq(&rq->cfs, rq); | 8201 | init_cfs_rq(&rq->cfs, rq); |
7218 | init_rt_rq(&rq->rt, rq); | 8202 | init_rt_rq(&rq->rt, rq); |
7219 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8203 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7220 | init_task_group.shares = init_task_group_load; | 8204 | init_task_group.shares = init_task_group_load; |
7221 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); | 8205 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
7222 | init_tg_cfs_entry(rq, &init_task_group, | 8206 | #ifdef CONFIG_CGROUP_SCHED |
8207 | /* | ||
8208 | * How much cpu bandwidth does init_task_group get? | ||
8209 | * | ||
8210 | * In case of task-groups formed thr' the cgroup filesystem, it | ||
8211 | * gets 100% of the cpu resources in the system. This overall | ||
8212 | * system cpu resource is divided among the tasks of | ||
8213 | * init_task_group and its child task-groups in a fair manner, | ||
8214 | * based on each entity's (task or task-group's) weight | ||
8215 | * (se->load.weight). | ||
8216 | * | ||
8217 | * In other words, if init_task_group has 10 tasks of weight | ||
8218 | * 1024) and two child groups A0 and A1 (of weight 1024 each), | ||
8219 | * then A0's share of the cpu resource is: | ||
8220 | * | ||
8221 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% | ||
8222 | * | ||
8223 | * We achieve this by letting init_task_group's tasks sit | ||
8224 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | ||
8225 | */ | ||
8226 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); | ||
8227 | #elif defined CONFIG_USER_SCHED | ||
8228 | root_task_group.shares = NICE_0_LOAD; | ||
8229 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); | ||
8230 | /* | ||
8231 | * In case of task-groups formed thr' the user id of tasks, | ||
8232 | * init_task_group represents tasks belonging to root user. | ||
8233 | * Hence it forms a sibling of all subsequent groups formed. | ||
8234 | * In this case, init_task_group gets only a fraction of overall | ||
8235 | * system cpu resource, based on the weight assigned to root | ||
8236 | * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished | ||
8237 | * by letting tasks of init_task_group sit in a separate cfs_rq | ||
8238 | * (init_cfs_rq) and having one entity represent this group of | ||
8239 | * tasks in rq->cfs (i.e init_task_group->se[] != NULL). | ||
8240 | */ | ||
8241 | init_tg_cfs_entry(&init_task_group, | ||
7223 | &per_cpu(init_cfs_rq, i), | 8242 | &per_cpu(init_cfs_rq, i), |
7224 | &per_cpu(init_sched_entity, i), i, 1); | 8243 | &per_cpu(init_sched_entity, i), i, 1, |
8244 | root_task_group.se[i]); | ||
7225 | 8245 | ||
7226 | #endif | 8246 | #endif |
8247 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
8248 | |||
8249 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | ||
7227 | #ifdef CONFIG_RT_GROUP_SCHED | 8250 | #ifdef CONFIG_RT_GROUP_SCHED |
7228 | init_task_group.rt_runtime = | ||
7229 | sysctl_sched_rt_runtime * NSEC_PER_USEC; | ||
7230 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | 8251 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
7231 | init_tg_rt_entry(rq, &init_task_group, | 8252 | #ifdef CONFIG_CGROUP_SCHED |
8253 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); | ||
8254 | #elif defined CONFIG_USER_SCHED | ||
8255 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | ||
8256 | init_tg_rt_entry(&init_task_group, | ||
7232 | &per_cpu(init_rt_rq, i), | 8257 | &per_cpu(init_rt_rq, i), |
7233 | &per_cpu(init_sched_rt_entity, i), i, 1); | 8258 | &per_cpu(init_sched_rt_entity, i), i, 1, |
8259 | root_task_group.rt_se[i]); | ||
8260 | #endif | ||
7234 | #endif | 8261 | #endif |
7235 | rq->rt_period_expire = 0; | ||
7236 | rq->rt_throttled = 0; | ||
7237 | 8262 | ||
7238 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 8263 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
7239 | rq->cpu_load[j] = 0; | 8264 | rq->cpu_load[j] = 0; |
@@ -7250,7 +8275,6 @@ void __init sched_init(void) | |||
7250 | #endif | 8275 | #endif |
7251 | init_rq_hrtick(rq); | 8276 | init_rq_hrtick(rq); |
7252 | atomic_set(&rq->nr_iowait, 0); | 8277 | atomic_set(&rq->nr_iowait, 0); |
7253 | highest_cpu = i; | ||
7254 | } | 8278 | } |
7255 | 8279 | ||
7256 | set_load_weight(&init_task); | 8280 | set_load_weight(&init_task); |
@@ -7260,7 +8284,6 @@ void __init sched_init(void) | |||
7260 | #endif | 8284 | #endif |
7261 | 8285 | ||
7262 | #ifdef CONFIG_SMP | 8286 | #ifdef CONFIG_SMP |
7263 | nr_cpu_ids = highest_cpu + 1; | ||
7264 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); | 8287 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); |
7265 | #endif | 8288 | #endif |
7266 | 8289 | ||
@@ -7419,8 +8442,6 @@ void set_curr_task(int cpu, struct task_struct *p) | |||
7419 | 8442 | ||
7420 | #endif | 8443 | #endif |
7421 | 8444 | ||
7422 | #ifdef CONFIG_GROUP_SCHED | ||
7423 | |||
7424 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8445 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7425 | static void free_fair_sched_group(struct task_group *tg) | 8446 | static void free_fair_sched_group(struct task_group *tg) |
7426 | { | 8447 | { |
@@ -7437,17 +8458,18 @@ static void free_fair_sched_group(struct task_group *tg) | |||
7437 | kfree(tg->se); | 8458 | kfree(tg->se); |
7438 | } | 8459 | } |
7439 | 8460 | ||
7440 | static int alloc_fair_sched_group(struct task_group *tg) | 8461 | static |
8462 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | ||
7441 | { | 8463 | { |
7442 | struct cfs_rq *cfs_rq; | 8464 | struct cfs_rq *cfs_rq; |
7443 | struct sched_entity *se; | 8465 | struct sched_entity *se, *parent_se; |
7444 | struct rq *rq; | 8466 | struct rq *rq; |
7445 | int i; | 8467 | int i; |
7446 | 8468 | ||
7447 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL); | 8469 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
7448 | if (!tg->cfs_rq) | 8470 | if (!tg->cfs_rq) |
7449 | goto err; | 8471 | goto err; |
7450 | tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL); | 8472 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
7451 | if (!tg->se) | 8473 | if (!tg->se) |
7452 | goto err; | 8474 | goto err; |
7453 | 8475 | ||
@@ -7466,7 +8488,8 @@ static int alloc_fair_sched_group(struct task_group *tg) | |||
7466 | if (!se) | 8488 | if (!se) |
7467 | goto err; | 8489 | goto err; |
7468 | 8490 | ||
7469 | init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); | 8491 | parent_se = parent ? parent->se[i] : NULL; |
8492 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
7470 | } | 8493 | } |
7471 | 8494 | ||
7472 | return 1; | 8495 | return 1; |
@@ -7490,7 +8513,8 @@ static inline void free_fair_sched_group(struct task_group *tg) | |||
7490 | { | 8513 | { |
7491 | } | 8514 | } |
7492 | 8515 | ||
7493 | static inline int alloc_fair_sched_group(struct task_group *tg) | 8516 | static inline |
8517 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | ||
7494 | { | 8518 | { |
7495 | return 1; | 8519 | return 1; |
7496 | } | 8520 | } |
@@ -7509,6 +8533,8 @@ static void free_rt_sched_group(struct task_group *tg) | |||
7509 | { | 8533 | { |
7510 | int i; | 8534 | int i; |
7511 | 8535 | ||
8536 | destroy_rt_bandwidth(&tg->rt_bandwidth); | ||
8537 | |||
7512 | for_each_possible_cpu(i) { | 8538 | for_each_possible_cpu(i) { |
7513 | if (tg->rt_rq) | 8539 | if (tg->rt_rq) |
7514 | kfree(tg->rt_rq[i]); | 8540 | kfree(tg->rt_rq[i]); |
@@ -7520,21 +8546,23 @@ static void free_rt_sched_group(struct task_group *tg) | |||
7520 | kfree(tg->rt_se); | 8546 | kfree(tg->rt_se); |
7521 | } | 8547 | } |
7522 | 8548 | ||
7523 | static int alloc_rt_sched_group(struct task_group *tg) | 8549 | static |
8550 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | ||
7524 | { | 8551 | { |
7525 | struct rt_rq *rt_rq; | 8552 | struct rt_rq *rt_rq; |
7526 | struct sched_rt_entity *rt_se; | 8553 | struct sched_rt_entity *rt_se, *parent_se; |
7527 | struct rq *rq; | 8554 | struct rq *rq; |
7528 | int i; | 8555 | int i; |
7529 | 8556 | ||
7530 | tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); | 8557 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
7531 | if (!tg->rt_rq) | 8558 | if (!tg->rt_rq) |
7532 | goto err; | 8559 | goto err; |
7533 | tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); | 8560 | tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); |
7534 | if (!tg->rt_se) | 8561 | if (!tg->rt_se) |
7535 | goto err; | 8562 | goto err; |
7536 | 8563 | ||
7537 | tg->rt_runtime = 0; | 8564 | init_rt_bandwidth(&tg->rt_bandwidth, |
8565 | ktime_to_ns(def_rt_bandwidth.rt_period), 0); | ||
7538 | 8566 | ||
7539 | for_each_possible_cpu(i) { | 8567 | for_each_possible_cpu(i) { |
7540 | rq = cpu_rq(i); | 8568 | rq = cpu_rq(i); |
@@ -7549,7 +8577,8 @@ static int alloc_rt_sched_group(struct task_group *tg) | |||
7549 | if (!rt_se) | 8577 | if (!rt_se) |
7550 | goto err; | 8578 | goto err; |
7551 | 8579 | ||
7552 | init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); | 8580 | parent_se = parent ? parent->rt_se[i] : NULL; |
8581 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
7553 | } | 8582 | } |
7554 | 8583 | ||
7555 | return 1; | 8584 | return 1; |
@@ -7573,7 +8602,8 @@ static inline void free_rt_sched_group(struct task_group *tg) | |||
7573 | { | 8602 | { |
7574 | } | 8603 | } |
7575 | 8604 | ||
7576 | static inline int alloc_rt_sched_group(struct task_group *tg) | 8605 | static inline |
8606 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | ||
7577 | { | 8607 | { |
7578 | return 1; | 8608 | return 1; |
7579 | } | 8609 | } |
@@ -7587,6 +8617,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) | |||
7587 | } | 8617 | } |
7588 | #endif | 8618 | #endif |
7589 | 8619 | ||
8620 | #ifdef CONFIG_GROUP_SCHED | ||
7590 | static void free_sched_group(struct task_group *tg) | 8621 | static void free_sched_group(struct task_group *tg) |
7591 | { | 8622 | { |
7592 | free_fair_sched_group(tg); | 8623 | free_fair_sched_group(tg); |
@@ -7595,7 +8626,7 @@ static void free_sched_group(struct task_group *tg) | |||
7595 | } | 8626 | } |
7596 | 8627 | ||
7597 | /* allocate runqueue etc for a new task group */ | 8628 | /* allocate runqueue etc for a new task group */ |
7598 | struct task_group *sched_create_group(void) | 8629 | struct task_group *sched_create_group(struct task_group *parent) |
7599 | { | 8630 | { |
7600 | struct task_group *tg; | 8631 | struct task_group *tg; |
7601 | unsigned long flags; | 8632 | unsigned long flags; |
@@ -7605,10 +8636,10 @@ struct task_group *sched_create_group(void) | |||
7605 | if (!tg) | 8636 | if (!tg) |
7606 | return ERR_PTR(-ENOMEM); | 8637 | return ERR_PTR(-ENOMEM); |
7607 | 8638 | ||
7608 | if (!alloc_fair_sched_group(tg)) | 8639 | if (!alloc_fair_sched_group(tg, parent)) |
7609 | goto err; | 8640 | goto err; |
7610 | 8641 | ||
7611 | if (!alloc_rt_sched_group(tg)) | 8642 | if (!alloc_rt_sched_group(tg, parent)) |
7612 | goto err; | 8643 | goto err; |
7613 | 8644 | ||
7614 | spin_lock_irqsave(&task_group_lock, flags); | 8645 | spin_lock_irqsave(&task_group_lock, flags); |
@@ -7617,6 +8648,12 @@ struct task_group *sched_create_group(void) | |||
7617 | register_rt_sched_group(tg, i); | 8648 | register_rt_sched_group(tg, i); |
7618 | } | 8649 | } |
7619 | list_add_rcu(&tg->list, &task_groups); | 8650 | list_add_rcu(&tg->list, &task_groups); |
8651 | |||
8652 | WARN_ON(!parent); /* root should already exist */ | ||
8653 | |||
8654 | tg->parent = parent; | ||
8655 | list_add_rcu(&tg->siblings, &parent->children); | ||
8656 | INIT_LIST_HEAD(&tg->children); | ||
7620 | spin_unlock_irqrestore(&task_group_lock, flags); | 8657 | spin_unlock_irqrestore(&task_group_lock, flags); |
7621 | 8658 | ||
7622 | return tg; | 8659 | return tg; |
@@ -7645,6 +8682,7 @@ void sched_destroy_group(struct task_group *tg) | |||
7645 | unregister_rt_sched_group(tg, i); | 8682 | unregister_rt_sched_group(tg, i); |
7646 | } | 8683 | } |
7647 | list_del_rcu(&tg->list); | 8684 | list_del_rcu(&tg->list); |
8685 | list_del_rcu(&tg->siblings); | ||
7648 | spin_unlock_irqrestore(&task_group_lock, flags); | 8686 | spin_unlock_irqrestore(&task_group_lock, flags); |
7649 | 8687 | ||
7650 | /* wait for possible concurrent references to cfs_rqs complete */ | 8688 | /* wait for possible concurrent references to cfs_rqs complete */ |
@@ -7688,16 +8726,14 @@ void sched_move_task(struct task_struct *tsk) | |||
7688 | 8726 | ||
7689 | task_rq_unlock(rq, &flags); | 8727 | task_rq_unlock(rq, &flags); |
7690 | } | 8728 | } |
8729 | #endif | ||
7691 | 8730 | ||
7692 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8731 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7693 | static void set_se_shares(struct sched_entity *se, unsigned long shares) | 8732 | static void __set_se_shares(struct sched_entity *se, unsigned long shares) |
7694 | { | 8733 | { |
7695 | struct cfs_rq *cfs_rq = se->cfs_rq; | 8734 | struct cfs_rq *cfs_rq = se->cfs_rq; |
7696 | struct rq *rq = cfs_rq->rq; | ||
7697 | int on_rq; | 8735 | int on_rq; |
7698 | 8736 | ||
7699 | spin_lock_irq(&rq->lock); | ||
7700 | |||
7701 | on_rq = se->on_rq; | 8737 | on_rq = se->on_rq; |
7702 | if (on_rq) | 8738 | if (on_rq) |
7703 | dequeue_entity(cfs_rq, se, 0); | 8739 | dequeue_entity(cfs_rq, se, 0); |
@@ -7707,8 +8743,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
7707 | 8743 | ||
7708 | if (on_rq) | 8744 | if (on_rq) |
7709 | enqueue_entity(cfs_rq, se, 0); | 8745 | enqueue_entity(cfs_rq, se, 0); |
8746 | } | ||
7710 | 8747 | ||
7711 | spin_unlock_irq(&rq->lock); | 8748 | static void set_se_shares(struct sched_entity *se, unsigned long shares) |
8749 | { | ||
8750 | struct cfs_rq *cfs_rq = se->cfs_rq; | ||
8751 | struct rq *rq = cfs_rq->rq; | ||
8752 | unsigned long flags; | ||
8753 | |||
8754 | spin_lock_irqsave(&rq->lock, flags); | ||
8755 | __set_se_shares(se, shares); | ||
8756 | spin_unlock_irqrestore(&rq->lock, flags); | ||
7712 | } | 8757 | } |
7713 | 8758 | ||
7714 | static DEFINE_MUTEX(shares_mutex); | 8759 | static DEFINE_MUTEX(shares_mutex); |
@@ -7719,12 +8764,18 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7719 | unsigned long flags; | 8764 | unsigned long flags; |
7720 | 8765 | ||
7721 | /* | 8766 | /* |
8767 | * We can't change the weight of the root cgroup. | ||
8768 | */ | ||
8769 | if (!tg->se[0]) | ||
8770 | return -EINVAL; | ||
8771 | |||
8772 | /* | ||
7722 | * A weight of 0 or 1 can cause arithmetics problems. | 8773 | * A weight of 0 or 1 can cause arithmetics problems. |
7723 | * (The default weight is 1024 - so there's no practical | 8774 | * (The default weight is 1024 - so there's no practical |
7724 | * limitation from this.) | 8775 | * limitation from this.) |
7725 | */ | 8776 | */ |
7726 | if (shares < 2) | 8777 | if (shares < MIN_SHARES) |
7727 | shares = 2; | 8778 | shares = MIN_SHARES; |
7728 | 8779 | ||
7729 | mutex_lock(&shares_mutex); | 8780 | mutex_lock(&shares_mutex); |
7730 | if (tg->shares == shares) | 8781 | if (tg->shares == shares) |
@@ -7733,6 +8784,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7733 | spin_lock_irqsave(&task_group_lock, flags); | 8784 | spin_lock_irqsave(&task_group_lock, flags); |
7734 | for_each_possible_cpu(i) | 8785 | for_each_possible_cpu(i) |
7735 | unregister_fair_sched_group(tg, i); | 8786 | unregister_fair_sched_group(tg, i); |
8787 | list_del_rcu(&tg->siblings); | ||
7736 | spin_unlock_irqrestore(&task_group_lock, flags); | 8788 | spin_unlock_irqrestore(&task_group_lock, flags); |
7737 | 8789 | ||
7738 | /* wait for any ongoing reference to this group to finish */ | 8790 | /* wait for any ongoing reference to this group to finish */ |
@@ -7743,8 +8795,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7743 | * w/o tripping rebalance_share or load_balance_fair. | 8795 | * w/o tripping rebalance_share or load_balance_fair. |
7744 | */ | 8796 | */ |
7745 | tg->shares = shares; | 8797 | tg->shares = shares; |
7746 | for_each_possible_cpu(i) | 8798 | for_each_possible_cpu(i) { |
7747 | set_se_shares(tg->se[i], shares); | 8799 | /* |
8800 | * force a rebalance | ||
8801 | */ | ||
8802 | cfs_rq_set_shares(tg->cfs_rq[i], 0); | ||
8803 | set_se_shares(tg->se[i], shares/nr_cpu_ids); | ||
8804 | } | ||
7748 | 8805 | ||
7749 | /* | 8806 | /* |
7750 | * Enable load balance activity on this group, by inserting it back on | 8807 | * Enable load balance activity on this group, by inserting it back on |
@@ -7753,6 +8810,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7753 | spin_lock_irqsave(&task_group_lock, flags); | 8810 | spin_lock_irqsave(&task_group_lock, flags); |
7754 | for_each_possible_cpu(i) | 8811 | for_each_possible_cpu(i) |
7755 | register_fair_sched_group(tg, i); | 8812 | register_fair_sched_group(tg, i); |
8813 | list_add_rcu(&tg->siblings, &tg->parent->children); | ||
7756 | spin_unlock_irqrestore(&task_group_lock, flags); | 8814 | spin_unlock_irqrestore(&task_group_lock, flags); |
7757 | done: | 8815 | done: |
7758 | mutex_unlock(&shares_mutex); | 8816 | mutex_unlock(&shares_mutex); |
@@ -7779,26 +8837,58 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
7779 | return div64_64(runtime << 16, period); | 8837 | return div64_64(runtime << 16, period); |
7780 | } | 8838 | } |
7781 | 8839 | ||
8840 | #ifdef CONFIG_CGROUP_SCHED | ||
8841 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | ||
8842 | { | ||
8843 | struct task_group *tgi, *parent = tg->parent; | ||
8844 | unsigned long total = 0; | ||
8845 | |||
8846 | if (!parent) { | ||
8847 | if (global_rt_period() < period) | ||
8848 | return 0; | ||
8849 | |||
8850 | return to_ratio(period, runtime) < | ||
8851 | to_ratio(global_rt_period(), global_rt_runtime()); | ||
8852 | } | ||
8853 | |||
8854 | if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) | ||
8855 | return 0; | ||
8856 | |||
8857 | rcu_read_lock(); | ||
8858 | list_for_each_entry_rcu(tgi, &parent->children, siblings) { | ||
8859 | if (tgi == tg) | ||
8860 | continue; | ||
8861 | |||
8862 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), | ||
8863 | tgi->rt_bandwidth.rt_runtime); | ||
8864 | } | ||
8865 | rcu_read_unlock(); | ||
8866 | |||
8867 | return total + to_ratio(period, runtime) < | ||
8868 | to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), | ||
8869 | parent->rt_bandwidth.rt_runtime); | ||
8870 | } | ||
8871 | #elif defined CONFIG_USER_SCHED | ||
7782 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8872 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
7783 | { | 8873 | { |
7784 | struct task_group *tgi; | 8874 | struct task_group *tgi; |
7785 | unsigned long total = 0; | 8875 | unsigned long total = 0; |
7786 | unsigned long global_ratio = | 8876 | unsigned long global_ratio = |
7787 | to_ratio(sysctl_sched_rt_period, | 8877 | to_ratio(global_rt_period(), global_rt_runtime()); |
7788 | sysctl_sched_rt_runtime < 0 ? | ||
7789 | RUNTIME_INF : sysctl_sched_rt_runtime); | ||
7790 | 8878 | ||
7791 | rcu_read_lock(); | 8879 | rcu_read_lock(); |
7792 | list_for_each_entry_rcu(tgi, &task_groups, list) { | 8880 | list_for_each_entry_rcu(tgi, &task_groups, list) { |
7793 | if (tgi == tg) | 8881 | if (tgi == tg) |
7794 | continue; | 8882 | continue; |
7795 | 8883 | ||
7796 | total += to_ratio(period, tgi->rt_runtime); | 8884 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), |
8885 | tgi->rt_bandwidth.rt_runtime); | ||
7797 | } | 8886 | } |
7798 | rcu_read_unlock(); | 8887 | rcu_read_unlock(); |
7799 | 8888 | ||
7800 | return total + to_ratio(period, runtime) < global_ratio; | 8889 | return total + to_ratio(period, runtime) < global_ratio; |
7801 | } | 8890 | } |
8891 | #endif | ||
7802 | 8892 | ||
7803 | /* Must be called with tasklist_lock held */ | 8893 | /* Must be called with tasklist_lock held */ |
7804 | static inline int tg_has_rt_tasks(struct task_group *tg) | 8894 | static inline int tg_has_rt_tasks(struct task_group *tg) |
@@ -7811,19 +8901,14 @@ static inline int tg_has_rt_tasks(struct task_group *tg) | |||
7811 | return 0; | 8901 | return 0; |
7812 | } | 8902 | } |
7813 | 8903 | ||
7814 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | 8904 | static int tg_set_bandwidth(struct task_group *tg, |
8905 | u64 rt_period, u64 rt_runtime) | ||
7815 | { | 8906 | { |
7816 | u64 rt_runtime, rt_period; | 8907 | int i, err = 0; |
7817 | int err = 0; | ||
7818 | |||
7819 | rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; | ||
7820 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; | ||
7821 | if (rt_runtime_us == -1) | ||
7822 | rt_runtime = RUNTIME_INF; | ||
7823 | 8908 | ||
7824 | mutex_lock(&rt_constraints_mutex); | 8909 | mutex_lock(&rt_constraints_mutex); |
7825 | read_lock(&tasklist_lock); | 8910 | read_lock(&tasklist_lock); |
7826 | if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) { | 8911 | if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { |
7827 | err = -EBUSY; | 8912 | err = -EBUSY; |
7828 | goto unlock; | 8913 | goto unlock; |
7829 | } | 8914 | } |
@@ -7831,7 +8916,19 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | |||
7831 | err = -EINVAL; | 8916 | err = -EINVAL; |
7832 | goto unlock; | 8917 | goto unlock; |
7833 | } | 8918 | } |
7834 | tg->rt_runtime = rt_runtime; | 8919 | |
8920 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | ||
8921 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | ||
8922 | tg->rt_bandwidth.rt_runtime = rt_runtime; | ||
8923 | |||
8924 | for_each_possible_cpu(i) { | ||
8925 | struct rt_rq *rt_rq = tg->rt_rq[i]; | ||
8926 | |||
8927 | spin_lock(&rt_rq->rt_runtime_lock); | ||
8928 | rt_rq->rt_runtime = rt_runtime; | ||
8929 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
8930 | } | ||
8931 | spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); | ||
7835 | unlock: | 8932 | unlock: |
7836 | read_unlock(&tasklist_lock); | 8933 | read_unlock(&tasklist_lock); |
7837 | mutex_unlock(&rt_constraints_mutex); | 8934 | mutex_unlock(&rt_constraints_mutex); |
@@ -7839,19 +8936,109 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | |||
7839 | return err; | 8936 | return err; |
7840 | } | 8937 | } |
7841 | 8938 | ||
8939 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | ||
8940 | { | ||
8941 | u64 rt_runtime, rt_period; | ||
8942 | |||
8943 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | ||
8944 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; | ||
8945 | if (rt_runtime_us < 0) | ||
8946 | rt_runtime = RUNTIME_INF; | ||
8947 | |||
8948 | return tg_set_bandwidth(tg, rt_period, rt_runtime); | ||
8949 | } | ||
8950 | |||
7842 | long sched_group_rt_runtime(struct task_group *tg) | 8951 | long sched_group_rt_runtime(struct task_group *tg) |
7843 | { | 8952 | { |
7844 | u64 rt_runtime_us; | 8953 | u64 rt_runtime_us; |
7845 | 8954 | ||
7846 | if (tg->rt_runtime == RUNTIME_INF) | 8955 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
7847 | return -1; | 8956 | return -1; |
7848 | 8957 | ||
7849 | rt_runtime_us = tg->rt_runtime; | 8958 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
7850 | do_div(rt_runtime_us, NSEC_PER_USEC); | 8959 | do_div(rt_runtime_us, NSEC_PER_USEC); |
7851 | return rt_runtime_us; | 8960 | return rt_runtime_us; |
7852 | } | 8961 | } |
8962 | |||
8963 | int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) | ||
8964 | { | ||
8965 | u64 rt_runtime, rt_period; | ||
8966 | |||
8967 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; | ||
8968 | rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
8969 | |||
8970 | return tg_set_bandwidth(tg, rt_period, rt_runtime); | ||
8971 | } | ||
8972 | |||
8973 | long sched_group_rt_period(struct task_group *tg) | ||
8974 | { | ||
8975 | u64 rt_period_us; | ||
8976 | |||
8977 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); | ||
8978 | do_div(rt_period_us, NSEC_PER_USEC); | ||
8979 | return rt_period_us; | ||
8980 | } | ||
8981 | |||
8982 | static int sched_rt_global_constraints(void) | ||
8983 | { | ||
8984 | int ret = 0; | ||
8985 | |||
8986 | mutex_lock(&rt_constraints_mutex); | ||
8987 | if (!__rt_schedulable(NULL, 1, 0)) | ||
8988 | ret = -EINVAL; | ||
8989 | mutex_unlock(&rt_constraints_mutex); | ||
8990 | |||
8991 | return ret; | ||
8992 | } | ||
8993 | #else | ||
8994 | static int sched_rt_global_constraints(void) | ||
8995 | { | ||
8996 | unsigned long flags; | ||
8997 | int i; | ||
8998 | |||
8999 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | ||
9000 | for_each_possible_cpu(i) { | ||
9001 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | ||
9002 | |||
9003 | spin_lock(&rt_rq->rt_runtime_lock); | ||
9004 | rt_rq->rt_runtime = global_rt_runtime(); | ||
9005 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
9006 | } | ||
9007 | spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); | ||
9008 | |||
9009 | return 0; | ||
9010 | } | ||
7853 | #endif | 9011 | #endif |
7854 | #endif /* CONFIG_GROUP_SCHED */ | 9012 | |
9013 | int sched_rt_handler(struct ctl_table *table, int write, | ||
9014 | struct file *filp, void __user *buffer, size_t *lenp, | ||
9015 | loff_t *ppos) | ||
9016 | { | ||
9017 | int ret; | ||
9018 | int old_period, old_runtime; | ||
9019 | static DEFINE_MUTEX(mutex); | ||
9020 | |||
9021 | mutex_lock(&mutex); | ||
9022 | old_period = sysctl_sched_rt_period; | ||
9023 | old_runtime = sysctl_sched_rt_runtime; | ||
9024 | |||
9025 | ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | ||
9026 | |||
9027 | if (!ret && write) { | ||
9028 | ret = sched_rt_global_constraints(); | ||
9029 | if (ret) { | ||
9030 | sysctl_sched_rt_period = old_period; | ||
9031 | sysctl_sched_rt_runtime = old_runtime; | ||
9032 | } else { | ||
9033 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); | ||
9034 | def_rt_bandwidth.rt_period = | ||
9035 | ns_to_ktime(global_rt_period()); | ||
9036 | } | ||
9037 | } | ||
9038 | mutex_unlock(&mutex); | ||
9039 | |||
9040 | return ret; | ||
9041 | } | ||
7855 | 9042 | ||
7856 | #ifdef CONFIG_CGROUP_SCHED | 9043 | #ifdef CONFIG_CGROUP_SCHED |
7857 | 9044 | ||
@@ -7865,7 +9052,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) | |||
7865 | static struct cgroup_subsys_state * | 9052 | static struct cgroup_subsys_state * |
7866 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9053 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) |
7867 | { | 9054 | { |
7868 | struct task_group *tg; | 9055 | struct task_group *tg, *parent; |
7869 | 9056 | ||
7870 | if (!cgrp->parent) { | 9057 | if (!cgrp->parent) { |
7871 | /* This is early initialization for the top cgroup */ | 9058 | /* This is early initialization for the top cgroup */ |
@@ -7873,11 +9060,8 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
7873 | return &init_task_group.css; | 9060 | return &init_task_group.css; |
7874 | } | 9061 | } |
7875 | 9062 | ||
7876 | /* we support only 1-level deep hierarchical scheduler atm */ | 9063 | parent = cgroup_tg(cgrp->parent); |
7877 | if (cgrp->parent->parent) | 9064 | tg = sched_create_group(parent); |
7878 | return ERR_PTR(-EINVAL); | ||
7879 | |||
7880 | tg = sched_create_group(); | ||
7881 | if (IS_ERR(tg)) | 9065 | if (IS_ERR(tg)) |
7882 | return ERR_PTR(-ENOMEM); | 9066 | return ERR_PTR(-ENOMEM); |
7883 | 9067 | ||
@@ -7901,7 +9085,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7901 | { | 9085 | { |
7902 | #ifdef CONFIG_RT_GROUP_SCHED | 9086 | #ifdef CONFIG_RT_GROUP_SCHED |
7903 | /* Don't accept realtime tasks when there is no way for them to run */ | 9087 | /* Don't accept realtime tasks when there is no way for them to run */ |
7904 | if (rt_task(tsk) && cgroup_tg(cgrp)->rt_runtime == 0) | 9088 | if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0) |
7905 | return -EINVAL; | 9089 | return -EINVAL; |
7906 | #else | 9090 | #else |
7907 | /* We don't support RT-tasks being in separate groups */ | 9091 | /* We don't support RT-tasks being in separate groups */ |
@@ -7935,7 +9119,7 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) | |||
7935 | #endif | 9119 | #endif |
7936 | 9120 | ||
7937 | #ifdef CONFIG_RT_GROUP_SCHED | 9121 | #ifdef CONFIG_RT_GROUP_SCHED |
7938 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, | 9122 | static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
7939 | struct file *file, | 9123 | struct file *file, |
7940 | const char __user *userbuf, | 9124 | const char __user *userbuf, |
7941 | size_t nbytes, loff_t *unused_ppos) | 9125 | size_t nbytes, loff_t *unused_ppos) |
@@ -7979,6 +9163,17 @@ static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft, | |||
7979 | 9163 | ||
7980 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | 9164 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); |
7981 | } | 9165 | } |
9166 | |||
9167 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, | ||
9168 | u64 rt_period_us) | ||
9169 | { | ||
9170 | return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); | ||
9171 | } | ||
9172 | |||
9173 | static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) | ||
9174 | { | ||
9175 | return sched_group_rt_period(cgroup_tg(cgrp)); | ||
9176 | } | ||
7982 | #endif | 9177 | #endif |
7983 | 9178 | ||
7984 | static struct cftype cpu_files[] = { | 9179 | static struct cftype cpu_files[] = { |
@@ -7995,6 +9190,11 @@ static struct cftype cpu_files[] = { | |||
7995 | .read = cpu_rt_runtime_read, | 9190 | .read = cpu_rt_runtime_read, |
7996 | .write = cpu_rt_runtime_write, | 9191 | .write = cpu_rt_runtime_write, |
7997 | }, | 9192 | }, |
9193 | { | ||
9194 | .name = "rt_period_us", | ||
9195 | .read_uint = cpu_rt_period_read_uint, | ||
9196 | .write_uint = cpu_rt_period_write_uint, | ||
9197 | }, | ||
7998 | #endif | 9198 | #endif |
7999 | }; | 9199 | }; |
8000 | 9200 | ||
@@ -8035,9 +9235,9 @@ struct cpuacct { | |||
8035 | struct cgroup_subsys cpuacct_subsys; | 9235 | struct cgroup_subsys cpuacct_subsys; |
8036 | 9236 | ||
8037 | /* return cpu accounting group corresponding to this container */ | 9237 | /* return cpu accounting group corresponding to this container */ |
8038 | static inline struct cpuacct *cgroup_ca(struct cgroup *cont) | 9238 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
8039 | { | 9239 | { |
8040 | return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), | 9240 | return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), |
8041 | struct cpuacct, css); | 9241 | struct cpuacct, css); |
8042 | } | 9242 | } |
8043 | 9243 | ||
@@ -8050,7 +9250,7 @@ static inline struct cpuacct *task_ca(struct task_struct *tsk) | |||
8050 | 9250 | ||
8051 | /* create a new cpu accounting group */ | 9251 | /* create a new cpu accounting group */ |
8052 | static struct cgroup_subsys_state *cpuacct_create( | 9252 | static struct cgroup_subsys_state *cpuacct_create( |
8053 | struct cgroup_subsys *ss, struct cgroup *cont) | 9253 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
8054 | { | 9254 | { |
8055 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | 9255 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
8056 | 9256 | ||
@@ -8068,18 +9268,18 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
8068 | 9268 | ||
8069 | /* destroy an existing cpu accounting group */ | 9269 | /* destroy an existing cpu accounting group */ |
8070 | static void | 9270 | static void |
8071 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 9271 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
8072 | { | 9272 | { |
8073 | struct cpuacct *ca = cgroup_ca(cont); | 9273 | struct cpuacct *ca = cgroup_ca(cgrp); |
8074 | 9274 | ||
8075 | free_percpu(ca->cpuusage); | 9275 | free_percpu(ca->cpuusage); |
8076 | kfree(ca); | 9276 | kfree(ca); |
8077 | } | 9277 | } |
8078 | 9278 | ||
8079 | /* return total cpu usage (in nanoseconds) of a group */ | 9279 | /* return total cpu usage (in nanoseconds) of a group */ |
8080 | static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) | 9280 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
8081 | { | 9281 | { |
8082 | struct cpuacct *ca = cgroup_ca(cont); | 9282 | struct cpuacct *ca = cgroup_ca(cgrp); |
8083 | u64 totalcpuusage = 0; | 9283 | u64 totalcpuusage = 0; |
8084 | int i; | 9284 | int i; |
8085 | 9285 | ||
@@ -8098,16 +9298,40 @@ static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) | |||
8098 | return totalcpuusage; | 9298 | return totalcpuusage; |
8099 | } | 9299 | } |
8100 | 9300 | ||
9301 | static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | ||
9302 | u64 reset) | ||
9303 | { | ||
9304 | struct cpuacct *ca = cgroup_ca(cgrp); | ||
9305 | int err = 0; | ||
9306 | int i; | ||
9307 | |||
9308 | if (reset) { | ||
9309 | err = -EINVAL; | ||
9310 | goto out; | ||
9311 | } | ||
9312 | |||
9313 | for_each_possible_cpu(i) { | ||
9314 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | ||
9315 | |||
9316 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9317 | *cpuusage = 0; | ||
9318 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9319 | } | ||
9320 | out: | ||
9321 | return err; | ||
9322 | } | ||
9323 | |||
8101 | static struct cftype files[] = { | 9324 | static struct cftype files[] = { |
8102 | { | 9325 | { |
8103 | .name = "usage", | 9326 | .name = "usage", |
8104 | .read_uint = cpuusage_read, | 9327 | .read_uint = cpuusage_read, |
9328 | .write_uint = cpuusage_write, | ||
8105 | }, | 9329 | }, |
8106 | }; | 9330 | }; |
8107 | 9331 | ||
8108 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) | 9332 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
8109 | { | 9333 | { |
8110 | return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); | 9334 | return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); |
8111 | } | 9335 | } |
8112 | 9336 | ||
8113 | /* | 9337 | /* |