diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 7 | ||||
| -rw-r--r-- | kernel/bounds.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 21 | ||||
| -rw-r--r-- | kernel/fork.c | 1 | ||||
| -rw-r--r-- | kernel/kexec.c | 1 | ||||
| -rw-r--r-- | kernel/reboot.c | 2 | ||||
| -rw-r--r-- | kernel/sched/core.c | 6 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 151 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 14 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
| -rw-r--r-- | kernel/user.c | 6 |
11 files changed, 119 insertions, 94 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d59c1bb..bc010ee272b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | |||
| 137 | ############################################################################### | 137 | ############################################################################### |
| 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) | 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) |
| 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) | 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) |
| 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 | 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 |
| 141 | X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ | 141 | X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ |
| 142 | $(or $(realpath $(CERT)),$(CERT)))) | 142 | $(or $(realpath $(CERT)),$(CERT)))) |
| 143 | X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) | ||
| 143 | 144 | ||
| 144 | ifeq ($(X509_CERTIFICATES),) | 145 | ifeq ($(X509_CERTIFICATES),) |
| 145 | $(warning *** No X.509 certificates found ***) | 146 | $(warning *** No X.509 certificates found ***) |
| @@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list | |||
| 164 | targets += $(obj)/.x509.list | 165 | targets += $(obj)/.x509.list |
| 165 | $(obj)/.x509.list: | 166 | $(obj)/.x509.list: |
| 166 | @echo $(X509_CERTIFICATES) >$@ | 167 | @echo $(X509_CERTIFICATES) >$@ |
| 168 | endif | ||
| 167 | 169 | ||
| 168 | clean-files := x509_certificate_list .x509.list | 170 | clean-files := x509_certificate_list .x509.list |
| 169 | endif | ||
| 170 | 171 | ||
| 171 | ifeq ($(CONFIG_MODULE_SIG),y) | 172 | ifeq ($(CONFIG_MODULE_SIG),y) |
| 172 | ############################################################################### | 173 | ############################################################################### |
diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204afdca..9fd4246b04b8 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
| @@ -22,6 +22,6 @@ void foo(void) | |||
| 22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
| 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); | 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); |
| 24 | #endif | 24 | #endif |
| 25 | DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); | 25 | DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); |
| 26 | /* End of constants */ | 26 | /* End of constants */ |
| 27 | } | 27 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
| 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 1397 | return; | 1397 | return; |
| 1398 | 1398 | ||
| 1399 | perf_pmu_disable(event->pmu); | ||
| 1400 | |||
| 1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
| 1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
| @@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
| 1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
| 1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
| 1417 | |||
| 1418 | perf_pmu_enable(event->pmu); | ||
| 1415 | } | 1419 | } |
| 1416 | 1420 | ||
| 1417 | static void | 1421 | static void |
| @@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
| 1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
| 1653 | { | 1657 | { |
| 1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
| 1659 | int ret = 0; | ||
| 1655 | 1660 | ||
| 1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 1657 | return 0; | 1662 | return 0; |
| @@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
| 1674 | */ | 1679 | */ |
| 1675 | smp_wmb(); | 1680 | smp_wmb(); |
| 1676 | 1681 | ||
| 1682 | perf_pmu_disable(event->pmu); | ||
| 1683 | |||
| 1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
| 1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
| 1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
| 1688 | goto out; | ||
| 1681 | } | 1689 | } |
| 1682 | 1690 | ||
| 1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
| @@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
| 1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
| 1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
| 1695 | 1703 | ||
| 1696 | return 0; | 1704 | out: |
| 1705 | perf_pmu_enable(event->pmu); | ||
| 1706 | |||
| 1707 | return ret; | ||
| 1697 | } | 1708 | } |
| 1698 | 1709 | ||
| 1699 | static int | 1710 | static int |
| @@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
| 2744 | continue; | 2755 | continue; |
| 2745 | 2756 | ||
| 2757 | perf_pmu_disable(event->pmu); | ||
| 2758 | |||
| 2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
| 2747 | 2760 | ||
| 2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
| @@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2752 | } | 2765 | } |
| 2753 | 2766 | ||
| 2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
| 2755 | continue; | 2768 | goto next; |
| 2756 | 2769 | ||
| 2757 | /* | 2770 | /* |
| 2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
| @@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
| 2775 | 2788 | ||
| 2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
| 2790 | next: | ||
| 2791 | perf_pmu_enable(event->pmu); | ||
| 2777 | } | 2792 | } |
| 2778 | 2793 | ||
| 2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |
diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be9548c..5721f0e3f2da 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 537 | spin_lock_init(&mm->page_table_lock); | 537 | spin_lock_init(&mm->page_table_lock); |
| 538 | mm_init_aio(mm); | 538 | mm_init_aio(mm); |
| 539 | mm_init_owner(mm, p); | 539 | mm_init_owner(mm, p); |
| 540 | clear_tlb_flush_pending(mm); | ||
| 540 | 541 | ||
| 541 | if (likely(!mm_alloc_pgd(mm))) { | 542 | if (likely(!mm_alloc_pgd(mm))) { |
| 542 | mm->def_flags = 0; | 543 | mm->def_flags = 0; |
diff --git a/kernel/kexec.c b/kernel/kexec.c index d0d8fca54065..9c970167e402 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1680,6 +1680,7 @@ int kernel_kexec(void) | |||
| 1680 | { | 1680 | { |
| 1681 | kexec_in_progress = true; | 1681 | kexec_in_progress = true; |
| 1682 | kernel_restart_prepare(NULL); | 1682 | kernel_restart_prepare(NULL); |
| 1683 | migrate_to_reboot_cpu(); | ||
| 1683 | printk(KERN_EMERG "Starting new kernel\n"); | 1684 | printk(KERN_EMERG "Starting new kernel\n"); |
| 1684 | machine_shutdown(); | 1685 | machine_shutdown(); |
| 1685 | } | 1686 | } |
diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b3474646..662c83fc16b7 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
| @@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) | |||
| 104 | } | 104 | } |
| 105 | EXPORT_SYMBOL(unregister_reboot_notifier); | 105 | EXPORT_SYMBOL(unregister_reboot_notifier); |
| 106 | 106 | ||
| 107 | static void migrate_to_reboot_cpu(void) | 107 | void migrate_to_reboot_cpu(void) |
| 108 | { | 108 | { |
| 109 | /* The boot cpu is always logical cpu 0 */ | 109 | /* The boot cpu is always logical cpu 0 */ |
| 110 | int cpu = reboot_cpu; | 110 | int cpu = reboot_cpu; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda20ab2b..a88f4a485c5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |||
| 4902 | static void update_top_cache_domain(int cpu) | 4902 | static void update_top_cache_domain(int cpu) |
| 4903 | { | 4903 | { |
| 4904 | struct sched_domain *sd; | 4904 | struct sched_domain *sd; |
| 4905 | struct sched_domain *busy_sd = NULL; | ||
| 4905 | int id = cpu; | 4906 | int id = cpu; |
| 4906 | int size = 1; | 4907 | int size = 1; |
| 4907 | 4908 | ||
| @@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) | |||
| 4909 | if (sd) { | 4910 | if (sd) { |
| 4910 | id = cpumask_first(sched_domain_span(sd)); | 4911 | id = cpumask_first(sched_domain_span(sd)); |
| 4911 | size = cpumask_weight(sched_domain_span(sd)); | 4912 | size = cpumask_weight(sched_domain_span(sd)); |
| 4912 | sd = sd->parent; /* sd_busy */ | 4913 | busy_sd = sd->parent; /* sd_busy */ |
| 4913 | } | 4914 | } |
| 4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | 4915 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
| 4915 | 4916 | ||
| 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4917 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
| 4917 | per_cpu(sd_llc_size, cpu) = size; | 4918 | per_cpu(sd_llc_size, cpu) = size; |
| @@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
| 5112 | * die on a /0 trap. | 5113 | * die on a /0 trap. |
| 5113 | */ | 5114 | */ |
| 5114 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | 5115 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); |
| 5116 | sg->sgp->power_orig = sg->sgp->power; | ||
| 5115 | 5117 | ||
| 5116 | /* | 5118 | /* |
| 5117 | * Make sure the first group of this domain contains the | 5119 | * Make sure the first group of this domain contains the |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ade1a31..c7395d97e4cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -178,59 +178,61 @@ void sched_init_granularity(void) | |||
| 178 | update_sysctl(); | 178 | update_sysctl(); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | #if BITS_PER_LONG == 32 | 181 | #define WMULT_CONST (~0U) |
| 182 | # define WMULT_CONST (~0UL) | ||
| 183 | #else | ||
| 184 | # define WMULT_CONST (1UL << 32) | ||
| 185 | #endif | ||
| 186 | |||
| 187 | #define WMULT_SHIFT 32 | 182 | #define WMULT_SHIFT 32 |
| 188 | 183 | ||
| 189 | /* | 184 | static void __update_inv_weight(struct load_weight *lw) |
| 190 | * Shift right and round: | 185 | { |
| 191 | */ | 186 | unsigned long w; |
| 192 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 187 | |
| 188 | if (likely(lw->inv_weight)) | ||
| 189 | return; | ||
| 190 | |||
| 191 | w = scale_load_down(lw->weight); | ||
| 192 | |||
| 193 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
| 194 | lw->inv_weight = 1; | ||
| 195 | else if (unlikely(!w)) | ||
| 196 | lw->inv_weight = WMULT_CONST; | ||
| 197 | else | ||
| 198 | lw->inv_weight = WMULT_CONST / w; | ||
| 199 | } | ||
| 193 | 200 | ||
| 194 | /* | 201 | /* |
| 195 | * delta *= weight / lw | 202 | * delta_exec * weight / lw.weight |
| 203 | * OR | ||
| 204 | * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT | ||
| 205 | * | ||
| 206 | * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case | ||
| 207 | * we're guaranteed shift stays positive because inv_weight is guaranteed to | ||
| 208 | * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. | ||
| 209 | * | ||
| 210 | * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus | ||
| 211 | * weight/lw.weight <= 1, and therefore our shift will also be positive. | ||
| 196 | */ | 212 | */ |
| 197 | static unsigned long | 213 | static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) |
| 198 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | ||
| 199 | struct load_weight *lw) | ||
| 200 | { | 214 | { |
| 201 | u64 tmp; | 215 | u64 fact = scale_load_down(weight); |
| 202 | 216 | int shift = WMULT_SHIFT; | |
| 203 | /* | ||
| 204 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
| 205 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
| 206 | * 2^SCHED_LOAD_RESOLUTION. | ||
| 207 | */ | ||
| 208 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
| 209 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
| 210 | else | ||
| 211 | tmp = (u64)delta_exec; | ||
| 212 | 217 | ||
| 213 | if (!lw->inv_weight) { | 218 | __update_inv_weight(lw); |
| 214 | unsigned long w = scale_load_down(lw->weight); | ||
| 215 | 219 | ||
| 216 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | 220 | if (unlikely(fact >> 32)) { |
| 217 | lw->inv_weight = 1; | 221 | while (fact >> 32) { |
| 218 | else if (unlikely(!w)) | 222 | fact >>= 1; |
| 219 | lw->inv_weight = WMULT_CONST; | 223 | shift--; |
| 220 | else | 224 | } |
| 221 | lw->inv_weight = WMULT_CONST / w; | ||
| 222 | } | 225 | } |
| 223 | 226 | ||
| 224 | /* | 227 | /* hint to use a 32x32->64 mul */ |
| 225 | * Check whether we'd overflow the 64-bit multiplication: | 228 | fact = (u64)(u32)fact * lw->inv_weight; |
| 226 | */ | 229 | |
| 227 | if (unlikely(tmp > WMULT_CONST)) | 230 | while (fact >> 32) { |
| 228 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 231 | fact >>= 1; |
| 229 | WMULT_SHIFT/2); | 232 | shift--; |
| 230 | else | 233 | } |
| 231 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | ||
| 232 | 234 | ||
| 233 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 235 | return mul_u64_u32_shr(delta_exec, fact, shift); |
| 234 | } | 236 | } |
| 235 | 237 | ||
| 236 | 238 | ||
| @@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
| 443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 445 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 444 | 446 | ||
| 445 | static __always_inline | 447 | static __always_inline |
| 446 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); | 448 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); |
| 447 | 449 | ||
| 448 | /************************************************************** | 450 | /************************************************************** |
| 449 | * Scheduling class tree data structure manipulation methods: | 451 | * Scheduling class tree data structure manipulation methods: |
| @@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, | |||
| 612 | /* | 614 | /* |
| 613 | * delta /= w | 615 | * delta /= w |
| 614 | */ | 616 | */ |
| 615 | static inline unsigned long | 617 | static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) |
| 616 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | ||
| 617 | { | 618 | { |
| 618 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 619 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
| 619 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 620 | delta = __calc_delta(delta, NICE_0_LOAD, &se->load); |
| 620 | 621 | ||
| 621 | return delta; | 622 | return delta; |
| 622 | } | 623 | } |
| @@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 665 | update_load_add(&lw, se->load.weight); | 666 | update_load_add(&lw, se->load.weight); |
| 666 | load = &lw; | 667 | load = &lw; |
| 667 | } | 668 | } |
| 668 | slice = calc_delta_mine(slice, se->load.weight, load); | 669 | slice = __calc_delta(slice, se->load.weight, load); |
| 669 | } | 670 | } |
| 670 | return slice; | 671 | return slice; |
| 671 | } | 672 | } |
| @@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) | |||
| 703 | #endif | 704 | #endif |
| 704 | 705 | ||
| 705 | /* | 706 | /* |
| 706 | * Update the current task's runtime statistics. Skip current tasks that | 707 | * Update the current task's runtime statistics. |
| 707 | * are not in our scheduling class. | ||
| 708 | */ | 708 | */ |
| 709 | static inline void | ||
| 710 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | ||
| 711 | unsigned long delta_exec) | ||
| 712 | { | ||
| 713 | unsigned long delta_exec_weighted; | ||
| 714 | |||
| 715 | schedstat_set(curr->statistics.exec_max, | ||
| 716 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
| 717 | |||
| 718 | curr->sum_exec_runtime += delta_exec; | ||
| 719 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
| 720 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | ||
| 721 | |||
| 722 | curr->vruntime += delta_exec_weighted; | ||
| 723 | update_min_vruntime(cfs_rq); | ||
| 724 | } | ||
| 725 | |||
| 726 | static void update_curr(struct cfs_rq *cfs_rq) | 709 | static void update_curr(struct cfs_rq *cfs_rq) |
| 727 | { | 710 | { |
| 728 | struct sched_entity *curr = cfs_rq->curr; | 711 | struct sched_entity *curr = cfs_rq->curr; |
| 729 | u64 now = rq_clock_task(rq_of(cfs_rq)); | 712 | u64 now = rq_clock_task(rq_of(cfs_rq)); |
| 730 | unsigned long delta_exec; | 713 | u64 delta_exec; |
| 731 | 714 | ||
| 732 | if (unlikely(!curr)) | 715 | if (unlikely(!curr)) |
| 733 | return; | 716 | return; |
| 734 | 717 | ||
| 735 | /* | 718 | delta_exec = now - curr->exec_start; |
| 736 | * Get the amount of time the current task was running | 719 | if (unlikely((s64)delta_exec <= 0)) |
| 737 | * since the last time we changed load (this cannot | ||
| 738 | * overflow on 32 bits): | ||
| 739 | */ | ||
| 740 | delta_exec = (unsigned long)(now - curr->exec_start); | ||
| 741 | if (!delta_exec) | ||
| 742 | return; | 720 | return; |
| 743 | 721 | ||
| 744 | __update_curr(cfs_rq, curr, delta_exec); | ||
| 745 | curr->exec_start = now; | 722 | curr->exec_start = now; |
| 746 | 723 | ||
| 724 | schedstat_set(curr->statistics.exec_max, | ||
| 725 | max(delta_exec, curr->statistics.exec_max)); | ||
| 726 | |||
| 727 | curr->sum_exec_runtime += delta_exec; | ||
| 728 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
| 729 | |||
| 730 | curr->vruntime += calc_delta_fair(delta_exec, curr); | ||
| 731 | update_min_vruntime(cfs_rq); | ||
| 732 | |||
| 747 | if (entity_is_task(curr)) { | 733 | if (entity_is_task(curr)) { |
| 748 | struct task_struct *curtask = task_of(curr); | 734 | struct task_struct *curtask = task_of(curr); |
| 749 | 735 | ||
| @@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work) | |||
| 1752 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) | 1738 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) |
| 1753 | continue; | 1739 | continue; |
| 1754 | 1740 | ||
| 1741 | /* | ||
| 1742 | * Skip inaccessible VMAs to avoid any confusion between | ||
| 1743 | * PROT_NONE and NUMA hinting ptes | ||
| 1744 | */ | ||
| 1745 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
| 1746 | continue; | ||
| 1747 | |||
| 1755 | do { | 1748 | do { |
| 1756 | start = max(start, vma->vm_start); | 1749 | start = max(start, vma->vm_start); |
| 1757 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); | 1750 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
| @@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
| 3015 | } | 3008 | } |
| 3016 | } | 3009 | } |
| 3017 | 3010 | ||
| 3018 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3011 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 3019 | unsigned long delta_exec) | ||
| 3020 | { | 3012 | { |
| 3021 | /* dock delta_exec before expiring quota (as it could span periods) */ | 3013 | /* dock delta_exec before expiring quota (as it could span periods) */ |
| 3022 | cfs_rq->runtime_remaining -= delta_exec; | 3014 | cfs_rq->runtime_remaining -= delta_exec; |
| @@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | |||
| 3034 | } | 3026 | } |
| 3035 | 3027 | ||
| 3036 | static __always_inline | 3028 | static __always_inline |
| 3037 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) | 3029 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 3038 | { | 3030 | { |
| 3039 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | 3031 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
| 3040 | return; | 3032 | return; |
| @@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) | |||
| 3574 | return rq_clock_task(rq_of(cfs_rq)); | 3566 | return rq_clock_task(rq_of(cfs_rq)); |
| 3575 | } | 3567 | } |
| 3576 | 3568 | ||
| 3577 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3569 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
| 3578 | unsigned long delta_exec) {} | ||
| 3579 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3570 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 3580 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 3571 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
| 3581 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3572 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275fc396..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
| 901 | { | 901 | { |
| 902 | struct rq *rq = rq_of_rt_rq(rt_rq); | 902 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| 903 | 903 | ||
| 904 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 905 | /* | ||
| 906 | * Change rq's cpupri only if rt_rq is the top queue. | ||
| 907 | */ | ||
| 908 | if (&rq->rt != rt_rq) | ||
| 909 | return; | ||
| 910 | #endif | ||
| 904 | if (rq->online && prio < prev_prio) | 911 | if (rq->online && prio < prev_prio) |
| 905 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | 912 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
| 906 | } | 913 | } |
| @@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
| 910 | { | 917 | { |
| 911 | struct rq *rq = rq_of_rt_rq(rt_rq); | 918 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| 912 | 919 | ||
| 920 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 921 | /* | ||
| 922 | * Change rq's cpupri only if rt_rq is the top queue. | ||
| 923 | */ | ||
| 924 | if (&rq->rt != rt_rq) | ||
| 925 | return; | ||
| 926 | #endif | ||
| 913 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) | 927 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
| 914 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 928 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
| 915 | } | 929 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9eaade2f..72a0f81dc5a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -775,7 +775,7 @@ static int ftrace_profile_init(void) | |||
| 775 | int cpu; | 775 | int cpu; |
| 776 | int ret = 0; | 776 | int ret = 0; |
| 777 | 777 | ||
| 778 | for_each_online_cpu(cpu) { | 778 | for_each_possible_cpu(cpu) { |
| 779 | ret = ftrace_profile_init_cpu(cpu); | 779 | ret = ftrace_profile_init_cpu(cpu); |
| 780 | if (ret) | 780 | if (ret) |
| 781 | break; | 781 | break; |
diff --git a/kernel/user.c b/kernel/user.c index a3a0dbfda329..c006131beb77 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { | |||
| 51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
| 52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
| 53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
| 54 | #ifdef CONFIG_KEYS_KERBEROS_CACHE | 54 | #ifdef CONFIG_PERSISTENT_KEYRINGS |
| 55 | .krb_cache_register_sem = | 55 | .persistent_keyring_register_sem = |
| 56 | __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), | 56 | __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), |
| 57 | #endif | 57 | #endif |
| 58 | }; | 58 | }; |
| 59 | EXPORT_SYMBOL_GPL(init_user_ns); | 59 | EXPORT_SYMBOL_GPL(init_user_ns); |
