diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/.gitignore | 1 | ||||
-rw-r--r-- | kernel/Makefile | 7 | ||||
-rw-r--r-- | kernel/bounds.c | 2 | ||||
-rw-r--r-- | kernel/events/core.c | 21 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/futex.c | 7 | ||||
-rw-r--r-- | kernel/kexec.c | 5 | ||||
-rw-r--r-- | kernel/reboot.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/fair.c | 151 | ||||
-rw-r--r-- | kernel/sched/rt.c | 14 | ||||
-rw-r--r-- | kernel/system_certificates.S | 14 | ||||
-rw-r--r-- | kernel/system_keyring.c | 4 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 2 | ||||
-rw-r--r-- | kernel/user.c | 6 | ||||
-rw-r--r-- | kernel/workqueue.c | 32 |
16 files changed, 152 insertions, 123 deletions
diff --git a/kernel/.gitignore b/kernel/.gitignore index b3097bde4e9c..790d83c7d160 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore | |||
@@ -5,3 +5,4 @@ config_data.h | |||
5 | config_data.gz | 5 | config_data.gz |
6 | timeconst.h | 6 | timeconst.h |
7 | hz.bc | 7 | hz.bc |
8 | x509_certificate_list | ||
diff --git a/kernel/Makefile b/kernel/Makefile index bbaf7d59c1bb..bc010ee272b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | |||
137 | ############################################################################### | 137 | ############################################################################### |
138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) | 138 | ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) |
139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) | 139 | X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) |
140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509 | 140 | X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 |
141 | X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ | 141 | X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ |
142 | $(or $(realpath $(CERT)),$(CERT)))) | 142 | $(or $(realpath $(CERT)),$(CERT)))) |
143 | X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) | ||
143 | 144 | ||
144 | ifeq ($(X509_CERTIFICATES),) | 145 | ifeq ($(X509_CERTIFICATES),) |
145 | $(warning *** No X.509 certificates found ***) | 146 | $(warning *** No X.509 certificates found ***) |
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list | |||
164 | targets += $(obj)/.x509.list | 165 | targets += $(obj)/.x509.list |
165 | $(obj)/.x509.list: | 166 | $(obj)/.x509.list: |
166 | @echo $(X509_CERTIFICATES) >$@ | 167 | @echo $(X509_CERTIFICATES) >$@ |
168 | endif | ||
167 | 169 | ||
168 | clean-files := x509_certificate_list .x509.list | 170 | clean-files := x509_certificate_list .x509.list |
169 | endif | ||
170 | 171 | ||
171 | ifeq ($(CONFIG_MODULE_SIG),y) | 172 | ifeq ($(CONFIG_MODULE_SIG),y) |
172 | ############################################################################### | 173 | ############################################################################### |
diff --git a/kernel/bounds.c b/kernel/bounds.c index 5253204afdca..9fd4246b04b8 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
@@ -22,6 +22,6 @@ void foo(void) | |||
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); | 23 | DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); |
24 | #endif | 24 | #endif |
25 | DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int)); | 25 | DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); |
26 | /* End of constants */ | 26 | /* End of constants */ |
27 | } | 27 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1397 | return; | 1397 | return; |
1398 | 1398 | ||
1399 | perf_pmu_disable(event->pmu); | ||
1400 | |||
1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
1417 | |||
1418 | perf_pmu_enable(event->pmu); | ||
1415 | } | 1419 | } |
1416 | 1420 | ||
1417 | static void | 1421 | static void |
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
1653 | { | 1657 | { |
1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
1659 | int ret = 0; | ||
1655 | 1660 | ||
1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
1657 | return 0; | 1662 | return 0; |
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
1674 | */ | 1679 | */ |
1675 | smp_wmb(); | 1680 | smp_wmb(); |
1676 | 1681 | ||
1682 | perf_pmu_disable(event->pmu); | ||
1683 | |||
1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
1688 | goto out; | ||
1681 | } | 1689 | } |
1682 | 1690 | ||
1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
1695 | 1703 | ||
1696 | return 0; | 1704 | out: |
1705 | perf_pmu_enable(event->pmu); | ||
1706 | |||
1707 | return ret; | ||
1697 | } | 1708 | } |
1698 | 1709 | ||
1699 | static int | 1710 | static int |
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
2744 | continue; | 2755 | continue; |
2745 | 2756 | ||
2757 | perf_pmu_disable(event->pmu); | ||
2758 | |||
2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
2747 | 2760 | ||
2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2752 | } | 2765 | } |
2753 | 2766 | ||
2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
2755 | continue; | 2768 | goto next; |
2756 | 2769 | ||
2757 | /* | 2770 | /* |
2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
2775 | 2788 | ||
2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2790 | next: | ||
2791 | perf_pmu_enable(event->pmu); | ||
2777 | } | 2792 | } |
2778 | 2793 | ||
2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |
diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be9548c..5721f0e3f2da 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
537 | spin_lock_init(&mm->page_table_lock); | 537 | spin_lock_init(&mm->page_table_lock); |
538 | mm_init_aio(mm); | 538 | mm_init_aio(mm); |
539 | mm_init_owner(mm, p); | 539 | mm_init_owner(mm, p); |
540 | clear_tlb_flush_pending(mm); | ||
540 | 541 | ||
541 | if (likely(!mm_alloc_pgd(mm))) { | 542 | if (likely(!mm_alloc_pgd(mm))) { |
542 | mm->def_flags = 0; | 543 | mm->def_flags = 0; |
diff --git a/kernel/futex.c b/kernel/futex.c index 80ba086f021d..f6ff0191ecf7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
251 | return -EINVAL; | 251 | return -EINVAL; |
252 | address -= key->both.offset; | 252 | address -= key->both.offset; |
253 | 253 | ||
254 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | ||
255 | return -EFAULT; | ||
256 | |||
254 | /* | 257 | /* |
255 | * PROCESS_PRIVATE futexes are fast. | 258 | * PROCESS_PRIVATE futexes are fast. |
256 | * As the mm cannot disappear under us and the 'key' only needs | 259 | * As the mm cannot disappear under us and the 'key' only needs |
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
259 | * but access_ok() should be faster than find_vma() | 262 | * but access_ok() should be faster than find_vma() |
260 | */ | 263 | */ |
261 | if (!fshared) { | 264 | if (!fshared) { |
262 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | ||
263 | return -EFAULT; | ||
264 | key->private.mm = mm; | 265 | key->private.mm = mm; |
265 | key->private.address = address; | 266 | key->private.address = address; |
266 | get_futex_key_refs(key); | 267 | get_futex_key_refs(key); |
@@ -288,7 +289,7 @@ again: | |||
288 | put_page(page); | 289 | put_page(page); |
289 | /* serialize against __split_huge_page_splitting() */ | 290 | /* serialize against __split_huge_page_splitting() */ |
290 | local_irq_disable(); | 291 | local_irq_disable(); |
291 | if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { | 292 | if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) { |
292 | page_head = compound_head(page); | 293 | page_head = compound_head(page); |
293 | /* | 294 | /* |
294 | * page_head is valid pointer but we must pin | 295 | * page_head is valid pointer but we must pin |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc03627e..9c970167e402 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |||
47 | size_t vmcoreinfo_size; | 47 | size_t vmcoreinfo_size; |
48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | 48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); |
49 | 49 | ||
50 | /* Flag to indicate we are going to kexec a new kernel */ | ||
51 | bool kexec_in_progress = false; | ||
52 | |||
50 | /* Location of the reserved area for the crash kernel */ | 53 | /* Location of the reserved area for the crash kernel */ |
51 | struct resource crashk_res = { | 54 | struct resource crashk_res = { |
52 | .name = "Crash kernel", | 55 | .name = "Crash kernel", |
@@ -1675,7 +1678,9 @@ int kernel_kexec(void) | |||
1675 | } else | 1678 | } else |
1676 | #endif | 1679 | #endif |
1677 | { | 1680 | { |
1681 | kexec_in_progress = true; | ||
1678 | kernel_restart_prepare(NULL); | 1682 | kernel_restart_prepare(NULL); |
1683 | migrate_to_reboot_cpu(); | ||
1679 | printk(KERN_EMERG "Starting new kernel\n"); | 1684 | printk(KERN_EMERG "Starting new kernel\n"); |
1680 | machine_shutdown(); | 1685 | machine_shutdown(); |
1681 | } | 1686 | } |
diff --git a/kernel/reboot.c b/kernel/reboot.c index f813b3474646..662c83fc16b7 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb) | |||
104 | } | 104 | } |
105 | EXPORT_SYMBOL(unregister_reboot_notifier); | 105 | EXPORT_SYMBOL(unregister_reboot_notifier); |
106 | 106 | ||
107 | static void migrate_to_reboot_cpu(void) | 107 | void migrate_to_reboot_cpu(void) |
108 | { | 108 | { |
109 | /* The boot cpu is always logical cpu 0 */ | 109 | /* The boot cpu is always logical cpu 0 */ |
110 | int cpu = reboot_cpu; | 110 | int cpu = reboot_cpu; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85cda20ab2b..a88f4a485c5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); | |||
4902 | static void update_top_cache_domain(int cpu) | 4902 | static void update_top_cache_domain(int cpu) |
4903 | { | 4903 | { |
4904 | struct sched_domain *sd; | 4904 | struct sched_domain *sd; |
4905 | struct sched_domain *busy_sd = NULL; | ||
4905 | int id = cpu; | 4906 | int id = cpu; |
4906 | int size = 1; | 4907 | int size = 1; |
4907 | 4908 | ||
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu) | |||
4909 | if (sd) { | 4910 | if (sd) { |
4910 | id = cpumask_first(sched_domain_span(sd)); | 4911 | id = cpumask_first(sched_domain_span(sd)); |
4911 | size = cpumask_weight(sched_domain_span(sd)); | 4912 | size = cpumask_weight(sched_domain_span(sd)); |
4912 | sd = sd->parent; /* sd_busy */ | 4913 | busy_sd = sd->parent; /* sd_busy */ |
4913 | } | 4914 | } |
4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | 4915 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
4915 | 4916 | ||
4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4917 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
4917 | per_cpu(sd_llc_size, cpu) = size; | 4918 | per_cpu(sd_llc_size, cpu) = size; |
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5112 | * die on a /0 trap. | 5113 | * die on a /0 trap. |
5113 | */ | 5114 | */ |
5114 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | 5115 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); |
5116 | sg->sgp->power_orig = sg->sgp->power; | ||
5115 | 5117 | ||
5116 | /* | 5118 | /* |
5117 | * Make sure the first group of this domain contains the | 5119 | * Make sure the first group of this domain contains the |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fd773ade1a31..c7395d97e4cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -178,59 +178,61 @@ void sched_init_granularity(void) | |||
178 | update_sysctl(); | 178 | update_sysctl(); |
179 | } | 179 | } |
180 | 180 | ||
181 | #if BITS_PER_LONG == 32 | 181 | #define WMULT_CONST (~0U) |
182 | # define WMULT_CONST (~0UL) | ||
183 | #else | ||
184 | # define WMULT_CONST (1UL << 32) | ||
185 | #endif | ||
186 | |||
187 | #define WMULT_SHIFT 32 | 182 | #define WMULT_SHIFT 32 |
188 | 183 | ||
189 | /* | 184 | static void __update_inv_weight(struct load_weight *lw) |
190 | * Shift right and round: | 185 | { |
191 | */ | 186 | unsigned long w; |
192 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 187 | |
188 | if (likely(lw->inv_weight)) | ||
189 | return; | ||
190 | |||
191 | w = scale_load_down(lw->weight); | ||
192 | |||
193 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
194 | lw->inv_weight = 1; | ||
195 | else if (unlikely(!w)) | ||
196 | lw->inv_weight = WMULT_CONST; | ||
197 | else | ||
198 | lw->inv_weight = WMULT_CONST / w; | ||
199 | } | ||
193 | 200 | ||
194 | /* | 201 | /* |
195 | * delta *= weight / lw | 202 | * delta_exec * weight / lw.weight |
203 | * OR | ||
204 | * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT | ||
205 | * | ||
206 | * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case | ||
207 | * we're guaranteed shift stays positive because inv_weight is guaranteed to | ||
208 | * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. | ||
209 | * | ||
210 | * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus | ||
211 | * weight/lw.weight <= 1, and therefore our shift will also be positive. | ||
196 | */ | 212 | */ |
197 | static unsigned long | 213 | static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) |
198 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | ||
199 | struct load_weight *lw) | ||
200 | { | 214 | { |
201 | u64 tmp; | 215 | u64 fact = scale_load_down(weight); |
202 | 216 | int shift = WMULT_SHIFT; | |
203 | /* | ||
204 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
205 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
206 | * 2^SCHED_LOAD_RESOLUTION. | ||
207 | */ | ||
208 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
209 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
210 | else | ||
211 | tmp = (u64)delta_exec; | ||
212 | 217 | ||
213 | if (!lw->inv_weight) { | 218 | __update_inv_weight(lw); |
214 | unsigned long w = scale_load_down(lw->weight); | ||
215 | 219 | ||
216 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | 220 | if (unlikely(fact >> 32)) { |
217 | lw->inv_weight = 1; | 221 | while (fact >> 32) { |
218 | else if (unlikely(!w)) | 222 | fact >>= 1; |
219 | lw->inv_weight = WMULT_CONST; | 223 | shift--; |
220 | else | 224 | } |
221 | lw->inv_weight = WMULT_CONST / w; | ||
222 | } | 225 | } |
223 | 226 | ||
224 | /* | 227 | /* hint to use a 32x32->64 mul */ |
225 | * Check whether we'd overflow the 64-bit multiplication: | 228 | fact = (u64)(u32)fact * lw->inv_weight; |
226 | */ | 229 | |
227 | if (unlikely(tmp > WMULT_CONST)) | 230 | while (fact >> 32) { |
228 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | 231 | fact >>= 1; |
229 | WMULT_SHIFT/2); | 232 | shift--; |
230 | else | 233 | } |
231 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | ||
232 | 234 | ||
233 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 235 | return mul_u64_u32_shr(delta_exec, fact, shift); |
234 | } | 236 | } |
235 | 237 | ||
236 | 238 | ||
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |||
443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 445 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
444 | 446 | ||
445 | static __always_inline | 447 | static __always_inline |
446 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec); | 448 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); |
447 | 449 | ||
448 | /************************************************************** | 450 | /************************************************************** |
449 | * Scheduling class tree data structure manipulation methods: | 451 | * Scheduling class tree data structure manipulation methods: |
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write, | |||
612 | /* | 614 | /* |
613 | * delta /= w | 615 | * delta /= w |
614 | */ | 616 | */ |
615 | static inline unsigned long | 617 | static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) |
616 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | ||
617 | { | 618 | { |
618 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 619 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
619 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | 620 | delta = __calc_delta(delta, NICE_0_LOAD, &se->load); |
620 | 621 | ||
621 | return delta; | 622 | return delta; |
622 | } | 623 | } |
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
665 | update_load_add(&lw, se->load.weight); | 666 | update_load_add(&lw, se->load.weight); |
666 | load = &lw; | 667 | load = &lw; |
667 | } | 668 | } |
668 | slice = calc_delta_mine(slice, se->load.weight, load); | 669 | slice = __calc_delta(slice, se->load.weight, load); |
669 | } | 670 | } |
670 | return slice; | 671 | return slice; |
671 | } | 672 | } |
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p) | |||
703 | #endif | 704 | #endif |
704 | 705 | ||
705 | /* | 706 | /* |
706 | * Update the current task's runtime statistics. Skip current tasks that | 707 | * Update the current task's runtime statistics. |
707 | * are not in our scheduling class. | ||
708 | */ | 708 | */ |
709 | static inline void | ||
710 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | ||
711 | unsigned long delta_exec) | ||
712 | { | ||
713 | unsigned long delta_exec_weighted; | ||
714 | |||
715 | schedstat_set(curr->statistics.exec_max, | ||
716 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
717 | |||
718 | curr->sum_exec_runtime += delta_exec; | ||
719 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
720 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | ||
721 | |||
722 | curr->vruntime += delta_exec_weighted; | ||
723 | update_min_vruntime(cfs_rq); | ||
724 | } | ||
725 | |||
726 | static void update_curr(struct cfs_rq *cfs_rq) | 709 | static void update_curr(struct cfs_rq *cfs_rq) |
727 | { | 710 | { |
728 | struct sched_entity *curr = cfs_rq->curr; | 711 | struct sched_entity *curr = cfs_rq->curr; |
729 | u64 now = rq_clock_task(rq_of(cfs_rq)); | 712 | u64 now = rq_clock_task(rq_of(cfs_rq)); |
730 | unsigned long delta_exec; | 713 | u64 delta_exec; |
731 | 714 | ||
732 | if (unlikely(!curr)) | 715 | if (unlikely(!curr)) |
733 | return; | 716 | return; |
734 | 717 | ||
735 | /* | 718 | delta_exec = now - curr->exec_start; |
736 | * Get the amount of time the current task was running | 719 | if (unlikely((s64)delta_exec <= 0)) |
737 | * since the last time we changed load (this cannot | ||
738 | * overflow on 32 bits): | ||
739 | */ | ||
740 | delta_exec = (unsigned long)(now - curr->exec_start); | ||
741 | if (!delta_exec) | ||
742 | return; | 720 | return; |
743 | 721 | ||
744 | __update_curr(cfs_rq, curr, delta_exec); | ||
745 | curr->exec_start = now; | 722 | curr->exec_start = now; |
746 | 723 | ||
724 | schedstat_set(curr->statistics.exec_max, | ||
725 | max(delta_exec, curr->statistics.exec_max)); | ||
726 | |||
727 | curr->sum_exec_runtime += delta_exec; | ||
728 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
729 | |||
730 | curr->vruntime += calc_delta_fair(delta_exec, curr); | ||
731 | update_min_vruntime(cfs_rq); | ||
732 | |||
747 | if (entity_is_task(curr)) { | 733 | if (entity_is_task(curr)) { |
748 | struct task_struct *curtask = task_of(curr); | 734 | struct task_struct *curtask = task_of(curr); |
749 | 735 | ||
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work) | |||
1752 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) | 1738 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) |
1753 | continue; | 1739 | continue; |
1754 | 1740 | ||
1741 | /* | ||
1742 | * Skip inaccessible VMAs to avoid any confusion between | ||
1743 | * PROT_NONE and NUMA hinting ptes | ||
1744 | */ | ||
1745 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
1746 | continue; | ||
1747 | |||
1755 | do { | 1748 | do { |
1756 | start = max(start, vma->vm_start); | 1749 | start = max(start, vma->vm_start); |
1757 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); | 1750 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
3015 | } | 3008 | } |
3016 | } | 3009 | } |
3017 | 3010 | ||
3018 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3011 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
3019 | unsigned long delta_exec) | ||
3020 | { | 3012 | { |
3021 | /* dock delta_exec before expiring quota (as it could span periods) */ | 3013 | /* dock delta_exec before expiring quota (as it could span periods) */ |
3022 | cfs_rq->runtime_remaining -= delta_exec; | 3014 | cfs_rq->runtime_remaining -= delta_exec; |
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | |||
3034 | } | 3026 | } |
3035 | 3027 | ||
3036 | static __always_inline | 3028 | static __always_inline |
3037 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) | 3029 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
3038 | { | 3030 | { |
3039 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | 3031 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
3040 | return; | 3032 | return; |
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) | |||
3574 | return rq_clock_task(rq_of(cfs_rq)); | 3566 | return rq_clock_task(rq_of(cfs_rq)); |
3575 | } | 3567 | } |
3576 | 3568 | ||
3577 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 3569 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
3578 | unsigned long delta_exec) {} | ||
3579 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3570 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
3580 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | 3571 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
3581 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | 3572 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7d57275fc396..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
901 | { | 901 | { |
902 | struct rq *rq = rq_of_rt_rq(rt_rq); | 902 | struct rq *rq = rq_of_rt_rq(rt_rq); |
903 | 903 | ||
904 | #ifdef CONFIG_RT_GROUP_SCHED | ||
905 | /* | ||
906 | * Change rq's cpupri only if rt_rq is the top queue. | ||
907 | */ | ||
908 | if (&rq->rt != rt_rq) | ||
909 | return; | ||
910 | #endif | ||
904 | if (rq->online && prio < prev_prio) | 911 | if (rq->online && prio < prev_prio) |
905 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | 912 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
906 | } | 913 | } |
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
910 | { | 917 | { |
911 | struct rq *rq = rq_of_rt_rq(rt_rq); | 918 | struct rq *rq = rq_of_rt_rq(rt_rq); |
912 | 919 | ||
920 | #ifdef CONFIG_RT_GROUP_SCHED | ||
921 | /* | ||
922 | * Change rq's cpupri only if rt_rq is the top queue. | ||
923 | */ | ||
924 | if (&rq->rt != rt_rq) | ||
925 | return; | ||
926 | #endif | ||
913 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) | 927 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
914 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 928 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
915 | } | 929 | } |
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S index 4aef390671cb..3e9868d47535 100644 --- a/kernel/system_certificates.S +++ b/kernel/system_certificates.S | |||
@@ -3,8 +3,18 @@ | |||
3 | 3 | ||
4 | __INITRODATA | 4 | __INITRODATA |
5 | 5 | ||
6 | .align 8 | ||
6 | .globl VMLINUX_SYMBOL(system_certificate_list) | 7 | .globl VMLINUX_SYMBOL(system_certificate_list) |
7 | VMLINUX_SYMBOL(system_certificate_list): | 8 | VMLINUX_SYMBOL(system_certificate_list): |
9 | __cert_list_start: | ||
8 | .incbin "kernel/x509_certificate_list" | 10 | .incbin "kernel/x509_certificate_list" |
9 | .globl VMLINUX_SYMBOL(system_certificate_list_end) | 11 | __cert_list_end: |
10 | VMLINUX_SYMBOL(system_certificate_list_end): | 12 | |
13 | .align 8 | ||
14 | .globl VMLINUX_SYMBOL(system_certificate_list_size) | ||
15 | VMLINUX_SYMBOL(system_certificate_list_size): | ||
16 | #ifdef CONFIG_64BIT | ||
17 | .quad __cert_list_end - __cert_list_start | ||
18 | #else | ||
19 | .long __cert_list_end - __cert_list_start | ||
20 | #endif | ||
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c index 564dd93430a2..52ebc70263f4 100644 --- a/kernel/system_keyring.c +++ b/kernel/system_keyring.c | |||
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring; | |||
22 | EXPORT_SYMBOL_GPL(system_trusted_keyring); | 22 | EXPORT_SYMBOL_GPL(system_trusted_keyring); |
23 | 23 | ||
24 | extern __initconst const u8 system_certificate_list[]; | 24 | extern __initconst const u8 system_certificate_list[]; |
25 | extern __initconst const u8 system_certificate_list_end[]; | 25 | extern __initconst const unsigned long system_certificate_list_size; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Load the compiled-in keys | 28 | * Load the compiled-in keys |
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void) | |||
60 | 60 | ||
61 | pr_notice("Loading compiled-in X.509 certificates\n"); | 61 | pr_notice("Loading compiled-in X.509 certificates\n"); |
62 | 62 | ||
63 | end = system_certificate_list_end; | ||
64 | p = system_certificate_list; | 63 | p = system_certificate_list; |
64 | end = p + system_certificate_list_size; | ||
65 | while (p < end) { | 65 | while (p < end) { |
66 | /* Each cert begins with an ASN.1 SEQUENCE tag and must be more | 66 | /* Each cert begins with an ASN.1 SEQUENCE tag and must be more |
67 | * than 256 bytes in size. | 67 | * than 256 bytes in size. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e9f9eaade2f..72a0f81dc5a8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void) | |||
775 | int cpu; | 775 | int cpu; |
776 | int ret = 0; | 776 | int ret = 0; |
777 | 777 | ||
778 | for_each_online_cpu(cpu) { | 778 | for_each_possible_cpu(cpu) { |
779 | ret = ftrace_profile_init_cpu(cpu); | 779 | ret = ftrace_profile_init_cpu(cpu); |
780 | if (ret) | 780 | if (ret) |
781 | break; | 781 | break; |
diff --git a/kernel/user.c b/kernel/user.c index a3a0dbfda329..c006131beb77 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = { | |||
51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
54 | #ifdef CONFIG_KEYS_KERBEROS_CACHE | 54 | #ifdef CONFIG_PERSISTENT_KEYRINGS |
55 | .krb_cache_register_sem = | 55 | .persistent_keyring_register_sem = |
56 | __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem), | 56 | __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), |
57 | #endif | 57 | #endif |
58 | }; | 58 | }; |
59 | EXPORT_SYMBOL_GPL(init_user_ns); | 59 | EXPORT_SYMBOL_GPL(init_user_ns); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c66912be990f..b010eac595d2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2851,19 +2851,6 @@ already_gone: | |||
2851 | return false; | 2851 | return false; |
2852 | } | 2852 | } |
2853 | 2853 | ||
2854 | static bool __flush_work(struct work_struct *work) | ||
2855 | { | ||
2856 | struct wq_barrier barr; | ||
2857 | |||
2858 | if (start_flush_work(work, &barr)) { | ||
2859 | wait_for_completion(&barr.done); | ||
2860 | destroy_work_on_stack(&barr.work); | ||
2861 | return true; | ||
2862 | } else { | ||
2863 | return false; | ||
2864 | } | ||
2865 | } | ||
2866 | |||
2867 | /** | 2854 | /** |
2868 | * flush_work - wait for a work to finish executing the last queueing instance | 2855 | * flush_work - wait for a work to finish executing the last queueing instance |
2869 | * @work: the work to flush | 2856 | * @work: the work to flush |
@@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work) | |||
2877 | */ | 2864 | */ |
2878 | bool flush_work(struct work_struct *work) | 2865 | bool flush_work(struct work_struct *work) |
2879 | { | 2866 | { |
2867 | struct wq_barrier barr; | ||
2868 | |||
2880 | lock_map_acquire(&work->lockdep_map); | 2869 | lock_map_acquire(&work->lockdep_map); |
2881 | lock_map_release(&work->lockdep_map); | 2870 | lock_map_release(&work->lockdep_map); |
2882 | 2871 | ||
2883 | return __flush_work(work); | 2872 | if (start_flush_work(work, &barr)) { |
2873 | wait_for_completion(&barr.done); | ||
2874 | destroy_work_on_stack(&barr.work); | ||
2875 | return true; | ||
2876 | } else { | ||
2877 | return false; | ||
2878 | } | ||
2884 | } | 2879 | } |
2885 | EXPORT_SYMBOL_GPL(flush_work); | 2880 | EXPORT_SYMBOL_GPL(flush_work); |
2886 | 2881 | ||
@@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4832 | 4827 | ||
4833 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4828 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4834 | schedule_work_on(cpu, &wfc.work); | 4829 | schedule_work_on(cpu, &wfc.work); |
4835 | 4830 | flush_work(&wfc.work); | |
4836 | /* | ||
4837 | * The work item is on-stack and can't lead to deadlock through | ||
4838 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4839 | * when work_on_cpu()s are nested. | ||
4840 | */ | ||
4841 | __flush_work(&wfc.work); | ||
4842 | |||
4843 | return wfc.ret; | 4831 | return wfc.ret; |
4844 | } | 4832 | } |
4845 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4833 | EXPORT_SYMBOL_GPL(work_on_cpu); |