aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/idle.c16
5 files changed, 12 insertions, 39 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 0f8f8b0bc1bf..60c302cfb4d3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -197,9 +197,9 @@ x509.genkey:
197 @echo >>x509.genkey "x509_extensions = myexts" 197 @echo >>x509.genkey "x509_extensions = myexts"
198 @echo >>x509.genkey 198 @echo >>x509.genkey
199 @echo >>x509.genkey "[ req_distinguished_name ]" 199 @echo >>x509.genkey "[ req_distinguished_name ]"
200 @echo >>x509.genkey "O = Magrathea" 200 @echo >>x509.genkey "#O = Unspecified company"
201 @echo >>x509.genkey "CN = Glacier signing key" 201 @echo >>x509.genkey "CN = Build time autogenerated kernel key"
202 @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" 202 @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
203 @echo >>x509.genkey 203 @echo >>x509.genkey
204 @echo >>x509.genkey "[ myexts ]" 204 @echo >>x509.genkey "[ myexts ]"
205 @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" 205 @echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 4139a0f8b558..54f0e7fcd0e2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -357,8 +357,8 @@ select_insn:
357 ALU64_MOD_X: 357 ALU64_MOD_X:
358 if (unlikely(SRC == 0)) 358 if (unlikely(SRC == 0))
359 return 0; 359 return 0;
360 tmp = DST; 360 div64_u64_rem(DST, SRC, &tmp);
361 DST = do_div(tmp, SRC); 361 DST = tmp;
362 CONT; 362 CONT;
363 ALU_MOD_X: 363 ALU_MOD_X:
364 if (unlikely(SRC == 0)) 364 if (unlikely(SRC == 0))
@@ -367,8 +367,8 @@ select_insn:
367 DST = do_div(tmp, (u32) SRC); 367 DST = do_div(tmp, (u32) SRC);
368 CONT; 368 CONT;
369 ALU64_MOD_K: 369 ALU64_MOD_K:
370 tmp = DST; 370 div64_u64_rem(DST, IMM, &tmp);
371 DST = do_div(tmp, IMM); 371 DST = tmp;
372 CONT; 372 CONT;
373 ALU_MOD_K: 373 ALU_MOD_K:
374 tmp = (u32) DST; 374 tmp = (u32) DST;
@@ -377,7 +377,7 @@ select_insn:
377 ALU64_DIV_X: 377 ALU64_DIV_X:
378 if (unlikely(SRC == 0)) 378 if (unlikely(SRC == 0))
379 return 0; 379 return 0;
380 do_div(DST, SRC); 380 DST = div64_u64(DST, SRC);
381 CONT; 381 CONT;
382 ALU_DIV_X: 382 ALU_DIV_X:
383 if (unlikely(SRC == 0)) 383 if (unlikely(SRC == 0))
@@ -387,7 +387,7 @@ select_insn:
387 DST = (u32) tmp; 387 DST = (u32) tmp;
388 CONT; 388 CONT;
389 ALU64_DIV_K: 389 ALU64_DIV_K:
390 do_div(DST, IMM); 390 DST = div64_u64(DST, IMM);
391 CONT; 391 CONT;
392 ALU_DIV_K: 392 ALU_DIV_K:
393 tmp = (u32) DST; 393 tmp = (u32) DST;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 38c25b1f2fd5..7a36fdcca5bf 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
707 do { 707 do {
708 unsigned long pfn, epfn, addr, eaddr; 708 unsigned long pfn, epfn, addr, eaddr;
709 709
710 pages = kimage_alloc_pages(GFP_KERNEL, order); 710 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
711 if (!pages) 711 if (!pages)
712 break; 712 break;
713 pfn = page_to_pfn(pages); 713 pfn = page_to_pfn(pages);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9123a82cbb6..fe22f7510bce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1016 rq_clock_skip_update(rq, true); 1016 rq_clock_skip_update(rq, true);
1017} 1017}
1018 1018
1019static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
1020
1021void register_task_migration_notifier(struct notifier_block *n)
1022{
1023 atomic_notifier_chain_register(&task_migration_notifier, n);
1024}
1025
1026#ifdef CONFIG_SMP 1019#ifdef CONFIG_SMP
1027void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1020void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1028{ 1021{
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1053 trace_sched_migrate_task(p, new_cpu); 1046 trace_sched_migrate_task(p, new_cpu);
1054 1047
1055 if (task_cpu(p) != new_cpu) { 1048 if (task_cpu(p) != new_cpu) {
1056 struct task_migration_notifier tmn;
1057
1058 if (p->sched_class->migrate_task_rq) 1049 if (p->sched_class->migrate_task_rq)
1059 p->sched_class->migrate_task_rq(p, new_cpu); 1050 p->sched_class->migrate_task_rq(p, new_cpu);
1060 p->se.nr_migrations++; 1051 p->se.nr_migrations++;
1061 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); 1052 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1062
1063 tmn.task = p;
1064 tmn.from_cpu = task_cpu(p);
1065 tmn.to_cpu = new_cpu;
1066
1067 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
1068 } 1053 }
1069 1054
1070 __set_task_cpu(p, new_cpu); 1055 __set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index deef1caa94c6..fefcb1fa5160 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void)
81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 82 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
83 int next_state, entered_state; 83 int next_state, entered_state;
84 unsigned int broadcast;
85 bool reflect; 84 bool reflect;
86 85
87 /* 86 /*
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void)
150 goto exit_idle; 149 goto exit_idle;
151 } 150 }
152 151
153 broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
154
155 /*
156 * Tell the time framework to switch to a broadcast timer
157 * because our local timer will be shutdown. If a local timer
158 * is used from another cpu as a broadcast timer, this call may
159 * fail if it is not available
160 */
161 if (broadcast && tick_broadcast_enter())
162 goto use_default;
163
164 /* Take note of the planned idle state. */ 152 /* Take note of the planned idle state. */
165 idle_set_state(this_rq(), &drv->states[next_state]); 153 idle_set_state(this_rq(), &drv->states[next_state]);
166 154
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void)
174 /* The cpu is no longer idle or about to enter idle. */ 162 /* The cpu is no longer idle or about to enter idle. */
175 idle_set_state(this_rq(), NULL); 163 idle_set_state(this_rq(), NULL);
176 164
177 if (broadcast) 165 if (entered_state == -EBUSY)
178 tick_broadcast_exit(); 166 goto use_default;
179 167
180 /* 168 /*
181 * Give the governor an opportunity to reflect on the outcome 169 * Give the governor an opportunity to reflect on the outcome