aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/sched/fair.c42
2 files changed, 42 insertions, 23 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 85cd41878a74..43d43a2d5811 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
1678 return event->state == PERF_EVENT_STATE_DEAD; 1678 return event->state == PERF_EVENT_STATE_DEAD;
1679} 1679}
1680 1680
1681static inline int pmu_filter_match(struct perf_event *event) 1681static inline int __pmu_filter_match(struct perf_event *event)
1682{ 1682{
1683 struct pmu *pmu = event->pmu; 1683 struct pmu *pmu = event->pmu;
1684 return pmu->filter_match ? pmu->filter_match(event) : 1; 1684 return pmu->filter_match ? pmu->filter_match(event) : 1;
1685} 1685}
1686 1686
1687/*
1688 * Check whether we should attempt to schedule an event group based on
1689 * PMU-specific filtering. An event group can consist of HW and SW events,
1690 * potentially with a SW leader, so we must check all the filters, to
1691 * determine whether a group is schedulable:
1692 */
1693static inline int pmu_filter_match(struct perf_event *event)
1694{
1695 struct perf_event *child;
1696
1697 if (!__pmu_filter_match(event))
1698 return 0;
1699
1700 list_for_each_entry(child, &event->sibling_list, group_entry) {
1701 if (!__pmu_filter_match(child))
1702 return 0;
1703 }
1704
1705 return 1;
1706}
1707
1687static inline int 1708static inline int
1688event_filter_match(struct perf_event *event) 1709event_filter_match(struct perf_event *event)
1689{ 1710{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bdcbeea90c95..c8c5d2d48424 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735 } 735 }
736} 736}
737 737
738static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
739static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
740#else 738#else
741void init_entity_runnable_average(struct sched_entity *se) 739void init_entity_runnable_average(struct sched_entity *se)
742{ 740{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2499 2497
2500#ifdef CONFIG_FAIR_GROUP_SCHED 2498#ifdef CONFIG_FAIR_GROUP_SCHED
2501# ifdef CONFIG_SMP 2499# ifdef CONFIG_SMP
2502static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) 2500static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2503{ 2501{
2504 long tg_weight; 2502 long tg_weight, load, shares;
2505 2503
2506 /* 2504 /*
2507 * Use this CPU's real-time load instead of the last load contribution 2505 * This really should be: cfs_rq->avg.load_avg, but instead we use
2508 * as the updating of the contribution is delayed, and we will use the 2506 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2509 * the real-time load to calc the share. See update_tg_load_avg(). 2507 * the shares for small weight interactive tasks.
2510 */ 2508 */
2511 tg_weight = atomic_long_read(&tg->load_avg); 2509 load = scale_load_down(cfs_rq->load.weight);
2512 tg_weight -= cfs_rq->tg_load_avg_contrib;
2513 tg_weight += cfs_rq->load.weight;
2514 2510
2515 return tg_weight; 2511 tg_weight = atomic_long_read(&tg->load_avg);
2516}
2517
2518static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2519{
2520 long tg_weight, load, shares;
2521 2512
2522 tg_weight = calc_tg_weight(tg, cfs_rq); 2513 /* Ensure tg_weight >= load */
2523 load = cfs_rq->load.weight; 2514 tg_weight -= cfs_rq->tg_load_avg_contrib;
2515 tg_weight += load;
2524 2516
2525 shares = (tg->shares * load); 2517 shares = (tg->shares * load);
2526 if (tg_weight) 2518 if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2539 return tg->shares; 2531 return tg->shares;
2540} 2532}
2541# endif /* CONFIG_SMP */ 2533# endif /* CONFIG_SMP */
2534
2542static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2535static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2543 unsigned long weight) 2536 unsigned long weight)
2544{ 2537{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4946 return wl; 4939 return wl;
4947 4940
4948 for_each_sched_entity(se) { 4941 for_each_sched_entity(se) {
4949 long w, W; 4942 struct cfs_rq *cfs_rq = se->my_q;
4943 long W, w = cfs_rq_load_avg(cfs_rq);
4950 4944
4951 tg = se->my_q->tg; 4945 tg = cfs_rq->tg;
4952 4946
4953 /* 4947 /*
4954 * W = @wg + \Sum rw_j 4948 * W = @wg + \Sum rw_j
4955 */ 4949 */
4956 W = wg + calc_tg_weight(tg, se->my_q); 4950 W = wg + atomic_long_read(&tg->load_avg);
4951
4952 /* Ensure \Sum rw_j >= rw_i */
4953 W -= cfs_rq->tg_load_avg_contrib;
4954 W += w;
4957 4955
4958 /* 4956 /*
4959 * w = rw_i + @wl 4957 * w = rw_i + @wl
4960 */ 4958 */
4961 w = cfs_rq_load_avg(se->my_q) + wl; 4959 w += wl;
4962 4960
4963 /* 4961 /*
4964 * wl = S * s'_i; see (2) 4962 * wl = S * s'_i; see (2)