aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c723
1 files changed, 491 insertions, 232 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4e2f60335656..d16c8d9fbd8b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -74,6 +74,8 @@
74#include <asm/tlb.h> 74#include <asm/tlb.h>
75#include <asm/irq_regs.h> 75#include <asm/irq_regs.h>
76 76
77#include "sched_cpupri.h"
78
77/* 79/*
78 * Convert user-nice values [ -20 ... 0 ... 19 ] 80 * Convert user-nice values [ -20 ... 0 ... 19 ]
79 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 81 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -289,15 +291,15 @@ struct task_group root_task_group;
289static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 291static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
290/* Default task group's cfs_rq on each cpu */ 292/* Default task group's cfs_rq on each cpu */
291static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 293static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
292#endif 294#endif /* CONFIG_FAIR_GROUP_SCHED */
293 295
294#ifdef CONFIG_RT_GROUP_SCHED 296#ifdef CONFIG_RT_GROUP_SCHED
295static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 297static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
296static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 298static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
297#endif 299#endif /* CONFIG_RT_GROUP_SCHED */
298#else 300#else /* !CONFIG_FAIR_GROUP_SCHED */
299#define root_task_group init_task_group 301#define root_task_group init_task_group
300#endif 302#endif /* CONFIG_FAIR_GROUP_SCHED */
301 303
302/* task_group_lock serializes add/remove of task groups and also changes to 304/* task_group_lock serializes add/remove of task groups and also changes to
303 * a task group's cpu shares. 305 * a task group's cpu shares.
@@ -307,9 +309,9 @@ static DEFINE_SPINLOCK(task_group_lock);
307#ifdef CONFIG_FAIR_GROUP_SCHED 309#ifdef CONFIG_FAIR_GROUP_SCHED
308#ifdef CONFIG_USER_SCHED 310#ifdef CONFIG_USER_SCHED
309# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 311# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
310#else 312#else /* !CONFIG_USER_SCHED */
311# define INIT_TASK_GROUP_LOAD NICE_0_LOAD 313# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
312#endif 314#endif /* CONFIG_USER_SCHED */
313 315
314/* 316/*
315 * A weight of 0 or 1 can cause arithmetics problems. 317 * A weight of 0 or 1 can cause arithmetics problems.
@@ -363,6 +365,10 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
363#else 365#else
364 366
365static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 367static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
368static inline struct task_group *task_group(struct task_struct *p)
369{
370 return NULL;
371}
366 372
367#endif /* CONFIG_GROUP_SCHED */ 373#endif /* CONFIG_GROUP_SCHED */
368 374
@@ -373,6 +379,7 @@ struct cfs_rq {
373 379
374 u64 exec_clock; 380 u64 exec_clock;
375 u64 min_vruntime; 381 u64 min_vruntime;
382 u64 pair_start;
376 383
377 struct rb_root tasks_timeline; 384 struct rb_root tasks_timeline;
378 struct rb_node *rb_leftmost; 385 struct rb_node *rb_leftmost;
@@ -401,6 +408,31 @@ struct cfs_rq {
401 */ 408 */
402 struct list_head leaf_cfs_rq_list; 409 struct list_head leaf_cfs_rq_list;
403 struct task_group *tg; /* group that "owns" this runqueue */ 410 struct task_group *tg; /* group that "owns" this runqueue */
411
412#ifdef CONFIG_SMP
413 /*
414 * the part of load.weight contributed by tasks
415 */
416 unsigned long task_weight;
417
418 /*
419 * h_load = weight * f(tg)
420 *
421 * Where f(tg) is the recursive weight fraction assigned to
422 * this group.
423 */
424 unsigned long h_load;
425
426 /*
427 * this cpu's part of tg->shares
428 */
429 unsigned long shares;
430
431 /*
432 * load.weight at the time we set shares
433 */
434 unsigned long rq_weight;
435#endif
404#endif 436#endif
405}; 437};
406 438
@@ -452,6 +484,9 @@ struct root_domain {
452 */ 484 */
453 cpumask_t rto_mask; 485 cpumask_t rto_mask;
454 atomic_t rto_count; 486 atomic_t rto_count;
487#ifdef CONFIG_SMP
488 struct cpupri cpupri;
489#endif
455}; 490};
456 491
457/* 492/*
@@ -526,6 +561,9 @@ struct rq {
526 int push_cpu; 561 int push_cpu;
527 /* cpu of this runqueue: */ 562 /* cpu of this runqueue: */
528 int cpu; 563 int cpu;
564 int online;
565
566 unsigned long avg_load_per_task;
529 567
530 struct task_struct *migration_thread; 568 struct task_struct *migration_thread;
531 struct list_head migration_queue; 569 struct list_head migration_queue;
@@ -749,6 +787,12 @@ late_initcall(sched_init_debug);
749const_debug unsigned int sysctl_sched_nr_migrate = 32; 787const_debug unsigned int sysctl_sched_nr_migrate = 32;
750 788
751/* 789/*
790 * ratelimit for updating the group shares.
791 * default: 0.5ms
792 */
793const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
794
795/*
752 * period over which we measure -rt task cpu usage in us. 796 * period over which we measure -rt task cpu usage in us.
753 * default: 1s 797 * default: 1s
754 */ 798 */
@@ -775,82 +819,6 @@ static inline u64 global_rt_runtime(void)
775 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 819 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
776} 820}
777 821
778unsigned long long time_sync_thresh = 100000;
779
780static DEFINE_PER_CPU(unsigned long long, time_offset);
781static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
782
783/*
784 * Global lock which we take every now and then to synchronize
785 * the CPUs time. This method is not warp-safe, but it's good
786 * enough to synchronize slowly diverging time sources and thus
787 * it's good enough for tracing:
788 */
789static DEFINE_SPINLOCK(time_sync_lock);
790static unsigned long long prev_global_time;
791
792static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
793{
794 /*
795 * We want this inlined, to not get tracer function calls
796 * in this critical section:
797 */
798 spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
799 __raw_spin_lock(&time_sync_lock.raw_lock);
800
801 if (time < prev_global_time) {
802 per_cpu(time_offset, cpu) += prev_global_time - time;
803 time = prev_global_time;
804 } else {
805 prev_global_time = time;
806 }
807
808 __raw_spin_unlock(&time_sync_lock.raw_lock);
809 spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
810
811 return time;
812}
813
814static unsigned long long __cpu_clock(int cpu)
815{
816 unsigned long long now;
817
818 /*
819 * Only call sched_clock() if the scheduler has already been
820 * initialized (some code might call cpu_clock() very early):
821 */
822 if (unlikely(!scheduler_running))
823 return 0;
824
825 now = sched_clock_cpu(cpu);
826
827 return now;
828}
829
830/*
831 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
832 * clock constructed from sched_clock():
833 */
834unsigned long long cpu_clock(int cpu)
835{
836 unsigned long long prev_cpu_time, time, delta_time;
837 unsigned long flags;
838
839 local_irq_save(flags);
840 prev_cpu_time = per_cpu(prev_cpu_time, cpu);
841 time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
842 delta_time = time-prev_cpu_time;
843
844 if (unlikely(delta_time > time_sync_thresh)) {
845 time = __sync_cpu_clock(time, cpu);
846 per_cpu(prev_cpu_time, cpu) = time;
847 }
848 local_irq_restore(flags);
849
850 return time;
851}
852EXPORT_SYMBOL_GPL(cpu_clock);
853
854#ifndef prepare_arch_switch 822#ifndef prepare_arch_switch
855# define prepare_arch_switch(next) do { } while (0) 823# define prepare_arch_switch(next) do { } while (0)
856#endif 824#endif
@@ -1313,15 +1281,15 @@ void wake_up_idle_cpu(int cpu)
1313 if (!tsk_is_polling(rq->idle)) 1281 if (!tsk_is_polling(rq->idle))
1314 smp_send_reschedule(cpu); 1282 smp_send_reschedule(cpu);
1315} 1283}
1316#endif 1284#endif /* CONFIG_NO_HZ */
1317 1285
1318#else 1286#else /* !CONFIG_SMP */
1319static void __resched_task(struct task_struct *p, int tif_bit) 1287static void __resched_task(struct task_struct *p, int tif_bit)
1320{ 1288{
1321 assert_spin_locked(&task_rq(p)->lock); 1289 assert_spin_locked(&task_rq(p)->lock);
1322 set_tsk_thread_flag(p, tif_bit); 1290 set_tsk_thread_flag(p, tif_bit);
1323} 1291}
1324#endif 1292#endif /* CONFIG_SMP */
1325 1293
1326#if BITS_PER_LONG == 32 1294#if BITS_PER_LONG == 32
1327# define WMULT_CONST (~0UL) 1295# define WMULT_CONST (~0UL)
@@ -1336,6 +1304,9 @@ static void __resched_task(struct task_struct *p, int tif_bit)
1336 */ 1304 */
1337#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 1305#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1338 1306
1307/*
1308 * delta *= weight / lw
1309 */
1339static unsigned long 1310static unsigned long
1340calc_delta_mine(unsigned long delta_exec, unsigned long weight, 1311calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1341 struct load_weight *lw) 1312 struct load_weight *lw)
@@ -1363,12 +1334,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1363 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 1334 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1364} 1335}
1365 1336
1366static inline unsigned long
1367calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
1368{
1369 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
1370}
1371
1372static inline void update_load_add(struct load_weight *lw, unsigned long inc) 1337static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1373{ 1338{
1374 lw->weight += inc; 1339 lw->weight += inc;
@@ -1479,17 +1444,211 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1479#ifdef CONFIG_SMP 1444#ifdef CONFIG_SMP
1480static unsigned long source_load(int cpu, int type); 1445static unsigned long source_load(int cpu, int type);
1481static unsigned long target_load(int cpu, int type); 1446static unsigned long target_load(int cpu, int type);
1482static unsigned long cpu_avg_load_per_task(int cpu);
1483static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1447static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1484#else /* CONFIG_SMP */ 1448
1449static unsigned long cpu_avg_load_per_task(int cpu)
1450{
1451 struct rq *rq = cpu_rq(cpu);
1452
1453 if (rq->nr_running)
1454 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1455
1456 return rq->avg_load_per_task;
1457}
1485 1458
1486#ifdef CONFIG_FAIR_GROUP_SCHED 1459#ifdef CONFIG_FAIR_GROUP_SCHED
1487static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) 1460
1461typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
1462
1463/*
1464 * Iterate the full tree, calling @down when first entering a node and @up when
1465 * leaving it for the final time.
1466 */
1467static void
1468walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
1469{
1470 struct task_group *parent, *child;
1471
1472 rcu_read_lock();
1473 parent = &root_task_group;
1474down:
1475 (*down)(parent, cpu, sd);
1476 list_for_each_entry_rcu(child, &parent->children, siblings) {
1477 parent = child;
1478 goto down;
1479
1480up:
1481 continue;
1482 }
1483 (*up)(parent, cpu, sd);
1484
1485 child = parent;
1486 parent = parent->parent;
1487 if (parent)
1488 goto up;
1489 rcu_read_unlock();
1490}
1491
1492static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1493
1494/*
1495 * Calculate and set the cpu's group shares.
1496 */
1497static void
1498__update_group_shares_cpu(struct task_group *tg, int cpu,
1499 unsigned long sd_shares, unsigned long sd_rq_weight)
1488{ 1500{
1501 int boost = 0;
1502 unsigned long shares;
1503 unsigned long rq_weight;
1504
1505 if (!tg->se[cpu])
1506 return;
1507
1508 rq_weight = tg->cfs_rq[cpu]->load.weight;
1509
1510 /*
1511 * If there are currently no tasks on the cpu pretend there is one of
1512 * average load so that when a new task gets to run here it will not
1513 * get delayed by group starvation.
1514 */
1515 if (!rq_weight) {
1516 boost = 1;
1517 rq_weight = NICE_0_LOAD;
1518 }
1519
1520 if (unlikely(rq_weight > sd_rq_weight))
1521 rq_weight = sd_rq_weight;
1522
1523 /*
1524 * \Sum shares * rq_weight
1525 * shares = -----------------------
1526 * \Sum rq_weight
1527 *
1528 */
1529 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
1530
1531 /*
1532 * record the actual number of shares, not the boosted amount.
1533 */
1534 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1535 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1536
1537 if (shares < MIN_SHARES)
1538 shares = MIN_SHARES;
1539 else if (shares > MAX_SHARES)
1540 shares = MAX_SHARES;
1541
1542 __set_se_shares(tg->se[cpu], shares);
1489} 1543}
1544
1545/*
1546 * Re-compute the task group their per cpu shares over the given domain.
1547 * This needs to be done in a bottom-up fashion because the rq weight of a
1548 * parent group depends on the shares of its child groups.
1549 */
1550static void
1551tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1552{
1553 unsigned long rq_weight = 0;
1554 unsigned long shares = 0;
1555 int i;
1556
1557 for_each_cpu_mask(i, sd->span) {
1558 rq_weight += tg->cfs_rq[i]->load.weight;
1559 shares += tg->cfs_rq[i]->shares;
1560 }
1561
1562 if ((!shares && rq_weight) || shares > tg->shares)
1563 shares = tg->shares;
1564
1565 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1566 shares = tg->shares;
1567
1568 if (!rq_weight)
1569 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1570
1571 for_each_cpu_mask(i, sd->span) {
1572 struct rq *rq = cpu_rq(i);
1573 unsigned long flags;
1574
1575 spin_lock_irqsave(&rq->lock, flags);
1576 __update_group_shares_cpu(tg, i, shares, rq_weight);
1577 spin_unlock_irqrestore(&rq->lock, flags);
1578 }
1579}
1580
1581/*
1582 * Compute the cpu's hierarchical load factor for each task group.
1583 * This needs to be done in a top-down fashion because the load of a child
1584 * group is a fraction of its parents load.
1585 */
1586static void
1587tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
1588{
1589 unsigned long load;
1590
1591 if (!tg->parent) {
1592 load = cpu_rq(cpu)->load.weight;
1593 } else {
1594 load = tg->parent->cfs_rq[cpu]->h_load;
1595 load *= tg->cfs_rq[cpu]->shares;
1596 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1597 }
1598
1599 tg->cfs_rq[cpu]->h_load = load;
1600}
1601
1602static void
1603tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
1604{
1605}
1606
1607static void update_shares(struct sched_domain *sd)
1608{
1609 u64 now = cpu_clock(raw_smp_processor_id());
1610 s64 elapsed = now - sd->last_update;
1611
1612 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1613 sd->last_update = now;
1614 walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
1615 }
1616}
1617
1618static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1619{
1620 spin_unlock(&rq->lock);
1621 update_shares(sd);
1622 spin_lock(&rq->lock);
1623}
1624
1625static void update_h_load(int cpu)
1626{
1627 walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
1628}
1629
1630#else
1631
1632static inline void update_shares(struct sched_domain *sd)
1633{
1634}
1635
1636static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1637{
1638}
1639
1490#endif 1640#endif
1491 1641
1492#endif /* CONFIG_SMP */ 1642#endif
1643
1644#ifdef CONFIG_FAIR_GROUP_SCHED
1645static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1646{
1647#ifdef CONFIG_SMP
1648 cfs_rq->shares = shares;
1649#endif
1650}
1651#endif
1493 1652
1494#include "sched_stats.h" 1653#include "sched_stats.h"
1495#include "sched_idletask.c" 1654#include "sched_idletask.c"
@@ -1500,27 +1659,17 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1500#endif 1659#endif
1501 1660
1502#define sched_class_highest (&rt_sched_class) 1661#define sched_class_highest (&rt_sched_class)
1662#define for_each_class(class) \
1663 for (class = sched_class_highest; class; class = class->next)
1503 1664
1504static inline void inc_load(struct rq *rq, const struct task_struct *p) 1665static void inc_nr_running(struct rq *rq)
1505{
1506 update_load_add(&rq->load, p->se.load.weight);
1507}
1508
1509static inline void dec_load(struct rq *rq, const struct task_struct *p)
1510{
1511 update_load_sub(&rq->load, p->se.load.weight);
1512}
1513
1514static void inc_nr_running(struct task_struct *p, struct rq *rq)
1515{ 1666{
1516 rq->nr_running++; 1667 rq->nr_running++;
1517 inc_load(rq, p);
1518} 1668}
1519 1669
1520static void dec_nr_running(struct task_struct *p, struct rq *rq) 1670static void dec_nr_running(struct rq *rq)
1521{ 1671{
1522 rq->nr_running--; 1672 rq->nr_running--;
1523 dec_load(rq, p);
1524} 1673}
1525 1674
1526static void set_load_weight(struct task_struct *p) 1675static void set_load_weight(struct task_struct *p)
@@ -1544,6 +1693,12 @@ static void set_load_weight(struct task_struct *p)
1544 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; 1693 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1545} 1694}
1546 1695
1696static void update_avg(u64 *avg, u64 sample)
1697{
1698 s64 diff = sample - *avg;
1699 *avg += diff >> 3;
1700}
1701
1547static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1702static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1548{ 1703{
1549 sched_info_queued(p); 1704 sched_info_queued(p);
@@ -1553,6 +1708,13 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1553 1708
1554static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1709static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1555{ 1710{
1711 if (sleep && p->se.last_wakeup) {
1712 update_avg(&p->se.avg_overlap,
1713 p->se.sum_exec_runtime - p->se.last_wakeup);
1714 p->se.last_wakeup = 0;
1715 }
1716
1717 sched_info_dequeued(p);
1556 p->sched_class->dequeue_task(rq, p, sleep); 1718 p->sched_class->dequeue_task(rq, p, sleep);
1557 p->se.on_rq = 0; 1719 p->se.on_rq = 0;
1558} 1720}
@@ -1612,7 +1774,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1612 rq->nr_uninterruptible--; 1774 rq->nr_uninterruptible--;
1613 1775
1614 enqueue_task(rq, p, wakeup); 1776 enqueue_task(rq, p, wakeup);
1615 inc_nr_running(p, rq); 1777 inc_nr_running(rq);
1616} 1778}
1617 1779
1618/* 1780/*
@@ -1624,7 +1786,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1624 rq->nr_uninterruptible++; 1786 rq->nr_uninterruptible++;
1625 1787
1626 dequeue_task(rq, p, sleep); 1788 dequeue_task(rq, p, sleep);
1627 dec_nr_running(p, rq); 1789 dec_nr_running(rq);
1628} 1790}
1629 1791
1630/** 1792/**
@@ -1636,12 +1798,6 @@ inline int task_curr(const struct task_struct *p)
1636 return cpu_curr(task_cpu(p)) == p; 1798 return cpu_curr(task_cpu(p)) == p;
1637} 1799}
1638 1800
1639/* Used instead of source_load when we know the type == 0 */
1640unsigned long weighted_cpuload(const int cpu)
1641{
1642 return cpu_rq(cpu)->load.weight;
1643}
1644
1645static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1801static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1646{ 1802{
1647 set_task_rq(p, cpu); 1803 set_task_rq(p, cpu);
@@ -1670,6 +1826,12 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1670 1826
1671#ifdef CONFIG_SMP 1827#ifdef CONFIG_SMP
1672 1828
1829/* Used instead of source_load when we know the type == 0 */
1830static unsigned long weighted_cpuload(const int cpu)
1831{
1832 return cpu_rq(cpu)->load.weight;
1833}
1834
1673/* 1835/*
1674 * Is this task likely cache-hot: 1836 * Is this task likely cache-hot:
1675 */ 1837 */
@@ -1880,7 +2042,7 @@ static unsigned long source_load(int cpu, int type)
1880 struct rq *rq = cpu_rq(cpu); 2042 struct rq *rq = cpu_rq(cpu);
1881 unsigned long total = weighted_cpuload(cpu); 2043 unsigned long total = weighted_cpuload(cpu);
1882 2044
1883 if (type == 0) 2045 if (type == 0 || !sched_feat(LB_BIAS))
1884 return total; 2046 return total;
1885 2047
1886 return min(rq->cpu_load[type-1], total); 2048 return min(rq->cpu_load[type-1], total);
@@ -1895,25 +2057,13 @@ static unsigned long target_load(int cpu, int type)
1895 struct rq *rq = cpu_rq(cpu); 2057 struct rq *rq = cpu_rq(cpu);
1896 unsigned long total = weighted_cpuload(cpu); 2058 unsigned long total = weighted_cpuload(cpu);
1897 2059
1898 if (type == 0) 2060 if (type == 0 || !sched_feat(LB_BIAS))
1899 return total; 2061 return total;
1900 2062
1901 return max(rq->cpu_load[type-1], total); 2063 return max(rq->cpu_load[type-1], total);
1902} 2064}
1903 2065
1904/* 2066/*
1905 * Return the average load per task on the cpu's run queue
1906 */
1907static unsigned long cpu_avg_load_per_task(int cpu)
1908{
1909 struct rq *rq = cpu_rq(cpu);
1910 unsigned long total = weighted_cpuload(cpu);
1911 unsigned long n = rq->nr_running;
1912
1913 return n ? total / n : SCHED_LOAD_SCALE;
1914}
1915
1916/*
1917 * find_idlest_group finds and returns the least busy CPU group within the 2067 * find_idlest_group finds and returns the least busy CPU group within the
1918 * domain. 2068 * domain.
1919 */ 2069 */
@@ -2019,6 +2169,9 @@ static int sched_balance_self(int cpu, int flag)
2019 sd = tmp; 2169 sd = tmp;
2020 } 2170 }
2021 2171
2172 if (sd)
2173 update_shares(sd);
2174
2022 while (sd) { 2175 while (sd) {
2023 cpumask_t span, tmpmask; 2176 cpumask_t span, tmpmask;
2024 struct sched_group *group; 2177 struct sched_group *group;
@@ -2085,6 +2238,22 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2085 if (!sched_feat(SYNC_WAKEUPS)) 2238 if (!sched_feat(SYNC_WAKEUPS))
2086 sync = 0; 2239 sync = 0;
2087 2240
2241#ifdef CONFIG_SMP
2242 if (sched_feat(LB_WAKEUP_UPDATE)) {
2243 struct sched_domain *sd;
2244
2245 this_cpu = raw_smp_processor_id();
2246 cpu = task_cpu(p);
2247
2248 for_each_domain(this_cpu, sd) {
2249 if (cpu_isset(cpu, sd->span)) {
2250 update_shares(sd);
2251 break;
2252 }
2253 }
2254 }
2255#endif
2256
2088 smp_wmb(); 2257 smp_wmb();
2089 rq = task_rq_lock(p, &flags); 2258 rq = task_rq_lock(p, &flags);
2090 old_state = p->state; 2259 old_state = p->state;
@@ -2131,7 +2300,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2131 } 2300 }
2132 } 2301 }
2133 } 2302 }
2134#endif 2303#endif /* CONFIG_SCHEDSTATS */
2135 2304
2136out_activate: 2305out_activate:
2137#endif /* CONFIG_SMP */ 2306#endif /* CONFIG_SMP */
@@ -2157,6 +2326,8 @@ out_running:
2157 p->sched_class->task_wake_up(rq, p); 2326 p->sched_class->task_wake_up(rq, p);
2158#endif 2327#endif
2159out: 2328out:
2329 current->se.last_wakeup = current->se.sum_exec_runtime;
2330
2160 task_rq_unlock(rq, &flags); 2331 task_rq_unlock(rq, &flags);
2161 2332
2162 return success; 2333 return success;
@@ -2277,7 +2448,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2277 * management (if any): 2448 * management (if any):
2278 */ 2449 */
2279 p->sched_class->task_new(rq, p); 2450 p->sched_class->task_new(rq, p);
2280 inc_nr_running(p, rq); 2451 inc_nr_running(rq);
2281 } 2452 }
2282 check_preempt_curr(rq, p); 2453 check_preempt_curr(rq, p);
2283#ifdef CONFIG_SMP 2454#ifdef CONFIG_SMP
@@ -2331,7 +2502,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
2331 notifier->ops->sched_out(notifier, next); 2502 notifier->ops->sched_out(notifier, next);
2332} 2503}
2333 2504
2334#else 2505#else /* !CONFIG_PREEMPT_NOTIFIERS */
2335 2506
2336static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2507static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2337{ 2508{
@@ -2343,7 +2514,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
2343{ 2514{
2344} 2515}
2345 2516
2346#endif 2517#endif /* CONFIG_PREEMPT_NOTIFIERS */
2347 2518
2348/** 2519/**
2349 * prepare_task_switch - prepare to switch tasks 2520 * prepare_task_switch - prepare to switch tasks
@@ -2785,7 +2956,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2785 enum cpu_idle_type idle, int *all_pinned, 2956 enum cpu_idle_type idle, int *all_pinned,
2786 int *this_best_prio, struct rq_iterator *iterator) 2957 int *this_best_prio, struct rq_iterator *iterator)
2787{ 2958{
2788 int loops = 0, pulled = 0, pinned = 0, skip_for_load; 2959 int loops = 0, pulled = 0, pinned = 0;
2789 struct task_struct *p; 2960 struct task_struct *p;
2790 long rem_load_move = max_load_move; 2961 long rem_load_move = max_load_move;
2791 2962
@@ -2801,14 +2972,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2801next: 2972next:
2802 if (!p || loops++ > sysctl_sched_nr_migrate) 2973 if (!p || loops++ > sysctl_sched_nr_migrate)
2803 goto out; 2974 goto out;
2804 /* 2975
2805 * To help distribute high priority tasks across CPUs we don't 2976 if ((p->se.load.weight >> 1) > rem_load_move ||
2806 * skip a task if it will be the highest priority task (i.e. smallest
2807 * prio value) on its new queue regardless of its load weight
2808 */
2809 skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
2810 SCHED_LOAD_SCALE_FUZZ;
2811 if ((skip_for_load && p->prio >= *this_best_prio) ||
2812 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { 2977 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2813 p = iterator->next(iterator->arg); 2978 p = iterator->next(iterator->arg);
2814 goto next; 2979 goto next;
@@ -2863,6 +3028,10 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2863 max_load_move - total_load_moved, 3028 max_load_move - total_load_moved,
2864 sd, idle, all_pinned, &this_best_prio); 3029 sd, idle, all_pinned, &this_best_prio);
2865 class = class->next; 3030 class = class->next;
3031
3032 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3033 break;
3034
2866 } while (class && max_load_move > total_load_moved); 3035 } while (class && max_load_move > total_load_moved);
2867 3036
2868 return total_load_moved > 0; 3037 return total_load_moved > 0;
@@ -2939,6 +3108,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2939 max_load = this_load = total_load = total_pwr = 0; 3108 max_load = this_load = total_load = total_pwr = 0;
2940 busiest_load_per_task = busiest_nr_running = 0; 3109 busiest_load_per_task = busiest_nr_running = 0;
2941 this_load_per_task = this_nr_running = 0; 3110 this_load_per_task = this_nr_running = 0;
3111
2942 if (idle == CPU_NOT_IDLE) 3112 if (idle == CPU_NOT_IDLE)
2943 load_idx = sd->busy_idx; 3113 load_idx = sd->busy_idx;
2944 else if (idle == CPU_NEWLY_IDLE) 3114 else if (idle == CPU_NEWLY_IDLE)
@@ -2953,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2953 int __group_imb = 0; 3123 int __group_imb = 0;
2954 unsigned int balance_cpu = -1, first_idle_cpu = 0; 3124 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2955 unsigned long sum_nr_running, sum_weighted_load; 3125 unsigned long sum_nr_running, sum_weighted_load;
3126 unsigned long sum_avg_load_per_task;
3127 unsigned long avg_load_per_task;
2956 3128
2957 local_group = cpu_isset(this_cpu, group->cpumask); 3129 local_group = cpu_isset(this_cpu, group->cpumask);
2958 3130
@@ -2961,6 +3133,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2961 3133
2962 /* Tally up the load of all CPUs in the group */ 3134 /* Tally up the load of all CPUs in the group */
2963 sum_weighted_load = sum_nr_running = avg_load = 0; 3135 sum_weighted_load = sum_nr_running = avg_load = 0;
3136 sum_avg_load_per_task = avg_load_per_task = 0;
3137
2964 max_cpu_load = 0; 3138 max_cpu_load = 0;
2965 min_cpu_load = ~0UL; 3139 min_cpu_load = ~0UL;
2966 3140
@@ -2994,6 +3168,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2994 avg_load += load; 3168 avg_load += load;
2995 sum_nr_running += rq->nr_running; 3169 sum_nr_running += rq->nr_running;
2996 sum_weighted_load += weighted_cpuload(i); 3170 sum_weighted_load += weighted_cpuload(i);
3171
3172 sum_avg_load_per_task += cpu_avg_load_per_task(i);
2997 } 3173 }
2998 3174
2999 /* 3175 /*
@@ -3015,7 +3191,20 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3015 avg_load = sg_div_cpu_power(group, 3191 avg_load = sg_div_cpu_power(group,
3016 avg_load * SCHED_LOAD_SCALE); 3192 avg_load * SCHED_LOAD_SCALE);
3017 3193
3018 if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE) 3194
3195 /*
3196 * Consider the group unbalanced when the imbalance is larger
3197 * than the average weight of two tasks.
3198 *
3199 * APZ: with cgroup the avg task weight can vary wildly and
3200 * might not be a suitable number - should we keep a
3201 * normalized nr_running number somewhere that negates
3202 * the hierarchy?
3203 */
3204 avg_load_per_task = sg_div_cpu_power(group,
3205 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3206
3207 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3019 __group_imb = 1; 3208 __group_imb = 1;
3020 3209
3021 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3210 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
@@ -3156,9 +3345,9 @@ small_imbalance:
3156 if (busiest_load_per_task > this_load_per_task) 3345 if (busiest_load_per_task > this_load_per_task)
3157 imbn = 1; 3346 imbn = 1;
3158 } else 3347 } else
3159 this_load_per_task = SCHED_LOAD_SCALE; 3348 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3160 3349
3161 if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >= 3350 if (max_load - this_load + 2*busiest_load_per_task >=
3162 busiest_load_per_task * imbn) { 3351 busiest_load_per_task * imbn) {
3163 *imbalance = busiest_load_per_task; 3352 *imbalance = busiest_load_per_task;
3164 return busiest; 3353 return busiest;
@@ -3284,6 +3473,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
3284 schedstat_inc(sd, lb_count[idle]); 3473 schedstat_inc(sd, lb_count[idle]);
3285 3474
3286redo: 3475redo:
3476 update_shares(sd);
3287 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, 3477 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
3288 cpus, balance); 3478 cpus, balance);
3289 3479
@@ -3386,8 +3576,9 @@ redo:
3386 3576
3387 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3577 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3388 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3578 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3389 return -1; 3579 ld_moved = -1;
3390 return ld_moved; 3580
3581 goto out;
3391 3582
3392out_balanced: 3583out_balanced:
3393 schedstat_inc(sd, lb_balanced[idle]); 3584 schedstat_inc(sd, lb_balanced[idle]);
@@ -3402,8 +3593,13 @@ out_one_pinned:
3402 3593
3403 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3594 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3404 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3595 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3405 return -1; 3596 ld_moved = -1;
3406 return 0; 3597 else
3598 ld_moved = 0;
3599out:
3600 if (ld_moved)
3601 update_shares(sd);
3602 return ld_moved;
3407} 3603}
3408 3604
3409/* 3605/*
@@ -3438,6 +3634,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3438 3634
3439 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); 3635 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
3440redo: 3636redo:
3637 update_shares_locked(this_rq, sd);
3441 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, 3638 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
3442 &sd_idle, cpus, NULL); 3639 &sd_idle, cpus, NULL);
3443 if (!group) { 3640 if (!group) {
@@ -3481,6 +3678,7 @@ redo:
3481 } else 3678 } else
3482 sd->nr_balance_failed = 0; 3679 sd->nr_balance_failed = 0;
3483 3680
3681 update_shares_locked(this_rq, sd);
3484 return ld_moved; 3682 return ld_moved;
3485 3683
3486out_balanced: 3684out_balanced:
@@ -3672,6 +3870,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3672 /* Earliest time when we have to do rebalance again */ 3870 /* Earliest time when we have to do rebalance again */
3673 unsigned long next_balance = jiffies + 60*HZ; 3871 unsigned long next_balance = jiffies + 60*HZ;
3674 int update_next_balance = 0; 3872 int update_next_balance = 0;
3873 int need_serialize;
3675 cpumask_t tmp; 3874 cpumask_t tmp;
3676 3875
3677 for_each_domain(cpu, sd) { 3876 for_each_domain(cpu, sd) {
@@ -3689,8 +3888,9 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3689 if (interval > HZ*NR_CPUS/10) 3888 if (interval > HZ*NR_CPUS/10)
3690 interval = HZ*NR_CPUS/10; 3889 interval = HZ*NR_CPUS/10;
3691 3890
3891 need_serialize = sd->flags & SD_SERIALIZE;
3692 3892
3693 if (sd->flags & SD_SERIALIZE) { 3893 if (need_serialize) {
3694 if (!spin_trylock(&balancing)) 3894 if (!spin_trylock(&balancing))
3695 goto out; 3895 goto out;
3696 } 3896 }
@@ -3706,7 +3906,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3706 } 3906 }
3707 sd->last_balance = jiffies; 3907 sd->last_balance = jiffies;
3708 } 3908 }
3709 if (sd->flags & SD_SERIALIZE) 3909 if (need_serialize)
3710 spin_unlock(&balancing); 3910 spin_unlock(&balancing);
3711out: 3911out:
3712 if (time_after(next_balance, sd->last_balance + interval)) { 3912 if (time_after(next_balance, sd->last_balance + interval)) {
@@ -4070,6 +4270,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
4070 prev->comm, prev->pid, preempt_count()); 4270 prev->comm, prev->pid, preempt_count());
4071 4271
4072 debug_show_held_locks(prev); 4272 debug_show_held_locks(prev);
4273 print_modules();
4073 if (irqs_disabled()) 4274 if (irqs_disabled())
4074 print_irqtrace_events(prev); 4275 print_irqtrace_events(prev);
4075 4276
@@ -4143,7 +4344,7 @@ asmlinkage void __sched schedule(void)
4143 struct task_struct *prev, *next; 4344 struct task_struct *prev, *next;
4144 unsigned long *switch_count; 4345 unsigned long *switch_count;
4145 struct rq *rq; 4346 struct rq *rq;
4146 int cpu; 4347 int cpu, hrtick = sched_feat(HRTICK);
4147 4348
4148need_resched: 4349need_resched:
4149 preempt_disable(); 4350 preempt_disable();
@@ -4158,7 +4359,8 @@ need_resched_nonpreemptible:
4158 4359
4159 schedule_debug(prev); 4360 schedule_debug(prev);
4160 4361
4161 hrtick_clear(rq); 4362 if (hrtick)
4363 hrtick_clear(rq);
4162 4364
4163 /* 4365 /*
4164 * Do the rq-clock update outside the rq lock: 4366 * Do the rq-clock update outside the rq lock:
@@ -4204,7 +4406,8 @@ need_resched_nonpreemptible:
4204 } else 4406 } else
4205 spin_unlock_irq(&rq->lock); 4407 spin_unlock_irq(&rq->lock);
4206 4408
4207 hrtick_set(rq); 4409 if (hrtick)
4410 hrtick_set(rq);
4208 4411
4209 if (unlikely(reacquire_kernel_lock(current) < 0)) 4412 if (unlikely(reacquire_kernel_lock(current) < 0))
4210 goto need_resched_nonpreemptible; 4413 goto need_resched_nonpreemptible;
@@ -4586,10 +4789,8 @@ void set_user_nice(struct task_struct *p, long nice)
4586 goto out_unlock; 4789 goto out_unlock;
4587 } 4790 }
4588 on_rq = p->se.on_rq; 4791 on_rq = p->se.on_rq;
4589 if (on_rq) { 4792 if (on_rq)
4590 dequeue_task(rq, p, 0); 4793 dequeue_task(rq, p, 0);
4591 dec_load(rq, p);
4592 }
4593 4794
4594 p->static_prio = NICE_TO_PRIO(nice); 4795 p->static_prio = NICE_TO_PRIO(nice);
4595 set_load_weight(p); 4796 set_load_weight(p);
@@ -4599,7 +4800,6 @@ void set_user_nice(struct task_struct *p, long nice)
4599 4800
4600 if (on_rq) { 4801 if (on_rq) {
4601 enqueue_task(rq, p, 0); 4802 enqueue_task(rq, p, 0);
4602 inc_load(rq, p);
4603 /* 4803 /*
4604 * If the task increased its priority or is running and 4804 * If the task increased its priority or is running and
4605 * lowered its priority, then reschedule its CPU: 4805 * lowered its priority, then reschedule its CPU:
@@ -5070,24 +5270,6 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
5070 return sched_setaffinity(pid, &new_mask); 5270 return sched_setaffinity(pid, &new_mask);
5071} 5271}
5072 5272
5073/*
5074 * Represents all cpu's present in the system
5075 * In systems capable of hotplug, this map could dynamically grow
5076 * as new cpu's are detected in the system via any platform specific
5077 * method, such as ACPI for e.g.
5078 */
5079
5080cpumask_t cpu_present_map __read_mostly;
5081EXPORT_SYMBOL(cpu_present_map);
5082
5083#ifndef CONFIG_SMP
5084cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
5085EXPORT_SYMBOL(cpu_online_map);
5086
5087cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
5088EXPORT_SYMBOL(cpu_possible_map);
5089#endif
5090
5091long sched_getaffinity(pid_t pid, cpumask_t *mask) 5273long sched_getaffinity(pid_t pid, cpumask_t *mask)
5092{ 5274{
5093 struct task_struct *p; 5275 struct task_struct *p;
@@ -5571,6 +5753,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5571 goto out; 5753 goto out;
5572 } 5754 }
5573 5755
5756 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
5757 !cpus_equal(p->cpus_allowed, *new_mask))) {
5758 ret = -EINVAL;
5759 goto out;
5760 }
5761
5574 if (p->sched_class->set_cpus_allowed) 5762 if (p->sched_class->set_cpus_allowed)
5575 p->sched_class->set_cpus_allowed(p, new_mask); 5763 p->sched_class->set_cpus_allowed(p, new_mask);
5576 else { 5764 else {
@@ -6060,6 +6248,36 @@ static void unregister_sched_domain_sysctl(void)
6060} 6248}
6061#endif 6249#endif
6062 6250
6251static void set_rq_online(struct rq *rq)
6252{
6253 if (!rq->online) {
6254 const struct sched_class *class;
6255
6256 cpu_set(rq->cpu, rq->rd->online);
6257 rq->online = 1;
6258
6259 for_each_class(class) {
6260 if (class->rq_online)
6261 class->rq_online(rq);
6262 }
6263 }
6264}
6265
6266static void set_rq_offline(struct rq *rq)
6267{
6268 if (rq->online) {
6269 const struct sched_class *class;
6270
6271 for_each_class(class) {
6272 if (class->rq_offline)
6273 class->rq_offline(rq);
6274 }
6275
6276 cpu_clear(rq->cpu, rq->rd->online);
6277 rq->online = 0;
6278 }
6279}
6280
6063/* 6281/*
6064 * migration_call - callback that gets triggered when a CPU is added. 6282 * migration_call - callback that gets triggered when a CPU is added.
6065 * Here we can start up the necessary migration thread for the new CPU. 6283 * Here we can start up the necessary migration thread for the new CPU.
@@ -6097,7 +6315,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6097 spin_lock_irqsave(&rq->lock, flags); 6315 spin_lock_irqsave(&rq->lock, flags);
6098 if (rq->rd) { 6316 if (rq->rd) {
6099 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6317 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6100 cpu_set(cpu, rq->rd->online); 6318
6319 set_rq_online(rq);
6101 } 6320 }
6102 spin_unlock_irqrestore(&rq->lock, flags); 6321 spin_unlock_irqrestore(&rq->lock, flags);
6103 break; 6322 break;
@@ -6158,7 +6377,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6158 spin_lock_irqsave(&rq->lock, flags); 6377 spin_lock_irqsave(&rq->lock, flags);
6159 if (rq->rd) { 6378 if (rq->rd) {
6160 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6379 BUG_ON(!cpu_isset(cpu, rq->rd->span));
6161 cpu_clear(cpu, rq->rd->online); 6380 set_rq_offline(rq);
6162 } 6381 }
6163 spin_unlock_irqrestore(&rq->lock, flags); 6382 spin_unlock_irqrestore(&rq->lock, flags);
6164 break; 6383 break;
@@ -6192,6 +6411,28 @@ void __init migration_init(void)
6192 6411
6193#ifdef CONFIG_SCHED_DEBUG 6412#ifdef CONFIG_SCHED_DEBUG
6194 6413
6414static inline const char *sd_level_to_string(enum sched_domain_level lvl)
6415{
6416 switch (lvl) {
6417 case SD_LV_NONE:
6418 return "NONE";
6419 case SD_LV_SIBLING:
6420 return "SIBLING";
6421 case SD_LV_MC:
6422 return "MC";
6423 case SD_LV_CPU:
6424 return "CPU";
6425 case SD_LV_NODE:
6426 return "NODE";
6427 case SD_LV_ALLNODES:
6428 return "ALLNODES";
6429 case SD_LV_MAX:
6430 return "MAX";
6431
6432 }
6433 return "MAX";
6434}
6435
6195static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6436static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6196 cpumask_t *groupmask) 6437 cpumask_t *groupmask)
6197{ 6438{
@@ -6211,7 +6452,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6211 return -1; 6452 return -1;
6212 } 6453 }
6213 6454
6214 printk(KERN_CONT "span %s\n", str); 6455 printk(KERN_CONT "span %s level %s\n",
6456 str, sd_level_to_string(sd->level));
6215 6457
6216 if (!cpu_isset(cpu, sd->span)) { 6458 if (!cpu_isset(cpu, sd->span)) {
6217 printk(KERN_ERR "ERROR: domain->span does not contain " 6459 printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -6295,9 +6537,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6295 } 6537 }
6296 kfree(groupmask); 6538 kfree(groupmask);
6297} 6539}
6298#else 6540#else /* !CONFIG_SCHED_DEBUG */
6299# define sched_domain_debug(sd, cpu) do { } while (0) 6541# define sched_domain_debug(sd, cpu) do { } while (0)
6300#endif 6542#endif /* CONFIG_SCHED_DEBUG */
6301 6543
6302static int sd_degenerate(struct sched_domain *sd) 6544static int sd_degenerate(struct sched_domain *sd)
6303{ 6545{
@@ -6357,20 +6599,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6357static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6599static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6358{ 6600{
6359 unsigned long flags; 6601 unsigned long flags;
6360 const struct sched_class *class;
6361 6602
6362 spin_lock_irqsave(&rq->lock, flags); 6603 spin_lock_irqsave(&rq->lock, flags);
6363 6604
6364 if (rq->rd) { 6605 if (rq->rd) {
6365 struct root_domain *old_rd = rq->rd; 6606 struct root_domain *old_rd = rq->rd;
6366 6607
6367 for (class = sched_class_highest; class; class = class->next) { 6608 if (cpu_isset(rq->cpu, old_rd->online))
6368 if (class->leave_domain) 6609 set_rq_offline(rq);
6369 class->leave_domain(rq);
6370 }
6371 6610
6372 cpu_clear(rq->cpu, old_rd->span); 6611 cpu_clear(rq->cpu, old_rd->span);
6373 cpu_clear(rq->cpu, old_rd->online);
6374 6612
6375 if (atomic_dec_and_test(&old_rd->refcount)) 6613 if (atomic_dec_and_test(&old_rd->refcount))
6376 kfree(old_rd); 6614 kfree(old_rd);
@@ -6381,12 +6619,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6381 6619
6382 cpu_set(rq->cpu, rd->span); 6620 cpu_set(rq->cpu, rd->span);
6383 if (cpu_isset(rq->cpu, cpu_online_map)) 6621 if (cpu_isset(rq->cpu, cpu_online_map))
6384 cpu_set(rq->cpu, rd->online); 6622 set_rq_online(rq);
6385
6386 for (class = sched_class_highest; class; class = class->next) {
6387 if (class->join_domain)
6388 class->join_domain(rq);
6389 }
6390 6623
6391 spin_unlock_irqrestore(&rq->lock, flags); 6624 spin_unlock_irqrestore(&rq->lock, flags);
6392} 6625}
@@ -6397,6 +6630,8 @@ static void init_rootdomain(struct root_domain *rd)
6397 6630
6398 cpus_clear(rd->span); 6631 cpus_clear(rd->span);
6399 cpus_clear(rd->online); 6632 cpus_clear(rd->online);
6633
6634 cpupri_init(&rd->cpupri);
6400} 6635}
6401 6636
6402static void init_defrootdomain(void) 6637static void init_defrootdomain(void)
@@ -6591,7 +6826,7 @@ static void sched_domain_node_span(int node, cpumask_t *span)
6591 cpus_or(*span, *span, *nodemask); 6826 cpus_or(*span, *span, *nodemask);
6592 } 6827 }
6593} 6828}
6594#endif 6829#endif /* CONFIG_NUMA */
6595 6830
6596int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 6831int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6597 6832
@@ -6610,7 +6845,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6610 *sg = &per_cpu(sched_group_cpus, cpu); 6845 *sg = &per_cpu(sched_group_cpus, cpu);
6611 return cpu; 6846 return cpu;
6612} 6847}
6613#endif 6848#endif /* CONFIG_SCHED_SMT */
6614 6849
6615/* 6850/*
6616 * multi-core sched-domains: 6851 * multi-core sched-domains:
@@ -6618,7 +6853,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6618#ifdef CONFIG_SCHED_MC 6853#ifdef CONFIG_SCHED_MC
6619static DEFINE_PER_CPU(struct sched_domain, core_domains); 6854static DEFINE_PER_CPU(struct sched_domain, core_domains);
6620static DEFINE_PER_CPU(struct sched_group, sched_group_core); 6855static DEFINE_PER_CPU(struct sched_group, sched_group_core);
6621#endif 6856#endif /* CONFIG_SCHED_MC */
6622 6857
6623#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 6858#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6624static int 6859static int
@@ -6720,7 +6955,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6720 sg = sg->next; 6955 sg = sg->next;
6721 } while (sg != group_head); 6956 } while (sg != group_head);
6722} 6957}
6723#endif 6958#endif /* CONFIG_NUMA */
6724 6959
6725#ifdef CONFIG_NUMA 6960#ifdef CONFIG_NUMA
6726/* Free memory allocated for various sched_group structures */ 6961/* Free memory allocated for various sched_group structures */
@@ -6757,11 +6992,11 @@ next_sg:
6757 sched_group_nodes_bycpu[cpu] = NULL; 6992 sched_group_nodes_bycpu[cpu] = NULL;
6758 } 6993 }
6759} 6994}
6760#else 6995#else /* !CONFIG_NUMA */
6761static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 6996static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6762{ 6997{
6763} 6998}
6764#endif 6999#endif /* CONFIG_NUMA */
6765 7000
6766/* 7001/*
6767 * Initialize sched groups cpu_power. 7002 * Initialize sched groups cpu_power.
@@ -7470,7 +7705,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7470#endif 7705#endif
7471 return err; 7706 return err;
7472} 7707}
7473#endif 7708#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7474 7709
7475/* 7710/*
7476 * Force a reinitialization of the sched domains hierarchy. The domains 7711 * Force a reinitialization of the sched domains hierarchy. The domains
@@ -7481,21 +7716,28 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7481static int update_sched_domains(struct notifier_block *nfb, 7716static int update_sched_domains(struct notifier_block *nfb,
7482 unsigned long action, void *hcpu) 7717 unsigned long action, void *hcpu)
7483{ 7718{
7719 int cpu = (int)(long)hcpu;
7720
7484 switch (action) { 7721 switch (action) {
7485 case CPU_UP_PREPARE:
7486 case CPU_UP_PREPARE_FROZEN:
7487 case CPU_DOWN_PREPARE: 7722 case CPU_DOWN_PREPARE:
7488 case CPU_DOWN_PREPARE_FROZEN: 7723 case CPU_DOWN_PREPARE_FROZEN:
7724 disable_runtime(cpu_rq(cpu));
7725 /* fall-through */
7726 case CPU_UP_PREPARE:
7727 case CPU_UP_PREPARE_FROZEN:
7489 detach_destroy_domains(&cpu_online_map); 7728 detach_destroy_domains(&cpu_online_map);
7490 free_sched_domains(); 7729 free_sched_domains();
7491 return NOTIFY_OK; 7730 return NOTIFY_OK;
7492 7731
7493 case CPU_UP_CANCELED: 7732
7494 case CPU_UP_CANCELED_FROZEN:
7495 case CPU_DOWN_FAILED: 7733 case CPU_DOWN_FAILED:
7496 case CPU_DOWN_FAILED_FROZEN: 7734 case CPU_DOWN_FAILED_FROZEN:
7497 case CPU_ONLINE: 7735 case CPU_ONLINE:
7498 case CPU_ONLINE_FROZEN: 7736 case CPU_ONLINE_FROZEN:
7737 enable_runtime(cpu_rq(cpu));
7738 /* fall-through */
7739 case CPU_UP_CANCELED:
7740 case CPU_UP_CANCELED_FROZEN:
7499 case CPU_DEAD: 7741 case CPU_DEAD:
7500 case CPU_DEAD_FROZEN: 7742 case CPU_DEAD_FROZEN:
7501 /* 7743 /*
@@ -7695,8 +7937,8 @@ void __init sched_init(void)
7695 7937
7696 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7938 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7697 ptr += nr_cpu_ids * sizeof(void **); 7939 ptr += nr_cpu_ids * sizeof(void **);
7698#endif 7940#endif /* CONFIG_USER_SCHED */
7699#endif 7941#endif /* CONFIG_FAIR_GROUP_SCHED */
7700#ifdef CONFIG_RT_GROUP_SCHED 7942#ifdef CONFIG_RT_GROUP_SCHED
7701 init_task_group.rt_se = (struct sched_rt_entity **)ptr; 7943 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7702 ptr += nr_cpu_ids * sizeof(void **); 7944 ptr += nr_cpu_ids * sizeof(void **);
@@ -7710,8 +7952,8 @@ void __init sched_init(void)
7710 7952
7711 root_task_group.rt_rq = (struct rt_rq **)ptr; 7953 root_task_group.rt_rq = (struct rt_rq **)ptr;
7712 ptr += nr_cpu_ids * sizeof(void **); 7954 ptr += nr_cpu_ids * sizeof(void **);
7713#endif 7955#endif /* CONFIG_USER_SCHED */
7714#endif 7956#endif /* CONFIG_RT_GROUP_SCHED */
7715 } 7957 }
7716 7958
7717#ifdef CONFIG_SMP 7959#ifdef CONFIG_SMP
@@ -7727,8 +7969,8 @@ void __init sched_init(void)
7727#ifdef CONFIG_USER_SCHED 7969#ifdef CONFIG_USER_SCHED
7728 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7970 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7729 global_rt_period(), RUNTIME_INF); 7971 global_rt_period(), RUNTIME_INF);
7730#endif 7972#endif /* CONFIG_USER_SCHED */
7731#endif 7973#endif /* CONFIG_RT_GROUP_SCHED */
7732 7974
7733#ifdef CONFIG_GROUP_SCHED 7975#ifdef CONFIG_GROUP_SCHED
7734 list_add(&init_task_group.list, &task_groups); 7976 list_add(&init_task_group.list, &task_groups);
@@ -7738,8 +7980,8 @@ void __init sched_init(void)
7738 INIT_LIST_HEAD(&root_task_group.children); 7980 INIT_LIST_HEAD(&root_task_group.children);
7739 init_task_group.parent = &root_task_group; 7981 init_task_group.parent = &root_task_group;
7740 list_add(&init_task_group.siblings, &root_task_group.children); 7982 list_add(&init_task_group.siblings, &root_task_group.children);
7741#endif 7983#endif /* CONFIG_USER_SCHED */
7742#endif 7984#endif /* CONFIG_GROUP_SCHED */
7743 7985
7744 for_each_possible_cpu(i) { 7986 for_each_possible_cpu(i) {
7745 struct rq *rq; 7987 struct rq *rq;
@@ -7819,6 +8061,7 @@ void __init sched_init(void)
7819 rq->next_balance = jiffies; 8061 rq->next_balance = jiffies;
7820 rq->push_cpu = 0; 8062 rq->push_cpu = 0;
7821 rq->cpu = i; 8063 rq->cpu = i;
8064 rq->online = 0;
7822 rq->migration_thread = NULL; 8065 rq->migration_thread = NULL;
7823 INIT_LIST_HEAD(&rq->migration_queue); 8066 INIT_LIST_HEAD(&rq->migration_queue);
7824 rq_attach_root(rq, &def_root_domain); 8067 rq_attach_root(rq, &def_root_domain);
@@ -8058,7 +8301,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8058{ 8301{
8059 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); 8302 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
8060} 8303}
8061#else 8304#else /* !CONFG_FAIR_GROUP_SCHED */
8062static inline void free_fair_sched_group(struct task_group *tg) 8305static inline void free_fair_sched_group(struct task_group *tg)
8063{ 8306{
8064} 8307}
@@ -8076,7 +8319,7 @@ static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8076static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) 8319static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8077{ 8320{
8078} 8321}
8079#endif 8322#endif /* CONFIG_FAIR_GROUP_SCHED */
8080 8323
8081#ifdef CONFIG_RT_GROUP_SCHED 8324#ifdef CONFIG_RT_GROUP_SCHED
8082static void free_rt_sched_group(struct task_group *tg) 8325static void free_rt_sched_group(struct task_group *tg)
@@ -8147,7 +8390,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8147{ 8390{
8148 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); 8391 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
8149} 8392}
8150#else 8393#else /* !CONFIG_RT_GROUP_SCHED */
8151static inline void free_rt_sched_group(struct task_group *tg) 8394static inline void free_rt_sched_group(struct task_group *tg)
8152{ 8395{
8153} 8396}
@@ -8165,7 +8408,7 @@ static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8165static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) 8408static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8166{ 8409{
8167} 8410}
8168#endif 8411#endif /* CONFIG_RT_GROUP_SCHED */
8169 8412
8170#ifdef CONFIG_GROUP_SCHED 8413#ifdef CONFIG_GROUP_SCHED
8171static void free_sched_group(struct task_group *tg) 8414static void free_sched_group(struct task_group *tg)
@@ -8276,17 +8519,14 @@ void sched_move_task(struct task_struct *tsk)
8276 8519
8277 task_rq_unlock(rq, &flags); 8520 task_rq_unlock(rq, &flags);
8278} 8521}
8279#endif 8522#endif /* CONFIG_GROUP_SCHED */
8280 8523
8281#ifdef CONFIG_FAIR_GROUP_SCHED 8524#ifdef CONFIG_FAIR_GROUP_SCHED
8282static void set_se_shares(struct sched_entity *se, unsigned long shares) 8525static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8283{ 8526{
8284 struct cfs_rq *cfs_rq = se->cfs_rq; 8527 struct cfs_rq *cfs_rq = se->cfs_rq;
8285 struct rq *rq = cfs_rq->rq;
8286 int on_rq; 8528 int on_rq;
8287 8529
8288 spin_lock_irq(&rq->lock);
8289
8290 on_rq = se->on_rq; 8530 on_rq = se->on_rq;
8291 if (on_rq) 8531 if (on_rq)
8292 dequeue_entity(cfs_rq, se, 0); 8532 dequeue_entity(cfs_rq, se, 0);
@@ -8296,8 +8536,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
8296 8536
8297 if (on_rq) 8537 if (on_rq)
8298 enqueue_entity(cfs_rq, se, 0); 8538 enqueue_entity(cfs_rq, se, 0);
8539}
8299 8540
8300 spin_unlock_irq(&rq->lock); 8541static void set_se_shares(struct sched_entity *se, unsigned long shares)
8542{
8543 struct cfs_rq *cfs_rq = se->cfs_rq;
8544 struct rq *rq = cfs_rq->rq;
8545 unsigned long flags;
8546
8547 spin_lock_irqsave(&rq->lock, flags);
8548 __set_se_shares(se, shares);
8549 spin_unlock_irqrestore(&rq->lock, flags);
8301} 8550}
8302 8551
8303static DEFINE_MUTEX(shares_mutex); 8552static DEFINE_MUTEX(shares_mutex);
@@ -8336,8 +8585,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8336 * w/o tripping rebalance_share or load_balance_fair. 8585 * w/o tripping rebalance_share or load_balance_fair.
8337 */ 8586 */
8338 tg->shares = shares; 8587 tg->shares = shares;
8339 for_each_possible_cpu(i) 8588 for_each_possible_cpu(i) {
8589 /*
8590 * force a rebalance
8591 */
8592 cfs_rq_set_shares(tg->cfs_rq[i], 0);
8340 set_se_shares(tg->se[i], shares); 8593 set_se_shares(tg->se[i], shares);
8594 }
8341 8595
8342 /* 8596 /*
8343 * Enable load balance activity on this group, by inserting it back on 8597 * Enable load balance activity on this group, by inserting it back on
@@ -8376,7 +8630,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8376#ifdef CONFIG_CGROUP_SCHED 8630#ifdef CONFIG_CGROUP_SCHED
8377static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8631static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8378{ 8632{
8379 struct task_group *tgi, *parent = tg ? tg->parent : NULL; 8633 struct task_group *tgi, *parent = tg->parent;
8380 unsigned long total = 0; 8634 unsigned long total = 0;
8381 8635
8382 if (!parent) { 8636 if (!parent) {
@@ -8400,7 +8654,7 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8400 } 8654 }
8401 rcu_read_unlock(); 8655 rcu_read_unlock();
8402 8656
8403 return total + to_ratio(period, runtime) < 8657 return total + to_ratio(period, runtime) <=
8404 to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), 8658 to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period),
8405 parent->rt_bandwidth.rt_runtime); 8659 parent->rt_bandwidth.rt_runtime);
8406} 8660}
@@ -8520,16 +8774,21 @@ long sched_group_rt_period(struct task_group *tg)
8520 8774
8521static int sched_rt_global_constraints(void) 8775static int sched_rt_global_constraints(void)
8522{ 8776{
8777 struct task_group *tg = &root_task_group;
8778 u64 rt_runtime, rt_period;
8523 int ret = 0; 8779 int ret = 0;
8524 8780
8781 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8782 rt_runtime = tg->rt_bandwidth.rt_runtime;
8783
8525 mutex_lock(&rt_constraints_mutex); 8784 mutex_lock(&rt_constraints_mutex);
8526 if (!__rt_schedulable(NULL, 1, 0)) 8785 if (!__rt_schedulable(tg, rt_period, rt_runtime))
8527 ret = -EINVAL; 8786 ret = -EINVAL;
8528 mutex_unlock(&rt_constraints_mutex); 8787 mutex_unlock(&rt_constraints_mutex);
8529 8788
8530 return ret; 8789 return ret;
8531} 8790}
8532#else 8791#else /* !CONFIG_RT_GROUP_SCHED */
8533static int sched_rt_global_constraints(void) 8792static int sched_rt_global_constraints(void)
8534{ 8793{
8535 unsigned long flags; 8794 unsigned long flags;
@@ -8547,7 +8806,7 @@ static int sched_rt_global_constraints(void)
8547 8806
8548 return 0; 8807 return 0;
8549} 8808}
8550#endif 8809#endif /* CONFIG_RT_GROUP_SCHED */
8551 8810
8552int sched_rt_handler(struct ctl_table *table, int write, 8811int sched_rt_handler(struct ctl_table *table, int write,
8553 struct file *filp, void __user *buffer, size_t *lenp, 8812 struct file *filp, void __user *buffer, size_t *lenp,
@@ -8655,7 +8914,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
8655 8914
8656 return (u64) tg->shares; 8915 return (u64) tg->shares;
8657} 8916}
8658#endif 8917#endif /* CONFIG_FAIR_GROUP_SCHED */
8659 8918
8660#ifdef CONFIG_RT_GROUP_SCHED 8919#ifdef CONFIG_RT_GROUP_SCHED
8661static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, 8920static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
@@ -8679,7 +8938,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8679{ 8938{
8680 return sched_group_rt_period(cgroup_tg(cgrp)); 8939 return sched_group_rt_period(cgroup_tg(cgrp));
8681} 8940}
8682#endif 8941#endif /* CONFIG_RT_GROUP_SCHED */
8683 8942
8684static struct cftype cpu_files[] = { 8943static struct cftype cpu_files[] = {
8685#ifdef CONFIG_FAIR_GROUP_SCHED 8944#ifdef CONFIG_FAIR_GROUP_SCHED