diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 11 | ||||
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | kernel/printk/printk_safe.c | 4 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 40 |
4 files changed, 39 insertions, 19 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index aa7fe85ad62e..0097acec1c71 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
607 | bool bringup = st->bringup; | 607 | bool bringup = st->bringup; |
608 | enum cpuhp_state state; | 608 | enum cpuhp_state state; |
609 | 609 | ||
610 | if (WARN_ON_ONCE(!st->should_run)) | ||
611 | return; | ||
612 | |||
610 | /* | 613 | /* |
611 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures | 614 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures |
612 | * that if we see ->should_run we also see the rest of the state. | 615 | * that if we see ->should_run we also see the rest of the state. |
613 | */ | 616 | */ |
614 | smp_mb(); | 617 | smp_mb(); |
615 | 618 | ||
616 | if (WARN_ON_ONCE(!st->should_run)) | ||
617 | return; | ||
618 | |||
619 | cpuhp_lock_acquire(bringup); | 619 | cpuhp_lock_acquire(bringup); |
620 | 620 | ||
621 | if (st->single) { | 621 | if (st->single) { |
@@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | |||
916 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | 916 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
917 | if (ret) { | 917 | if (ret) { |
918 | st->target = prev_state; | 918 | st->target = prev_state; |
919 | undo_cpu_down(cpu, st); | 919 | if (st->state < prev_state) |
920 | undo_cpu_down(cpu, st); | ||
920 | break; | 921 | break; |
921 | } | 922 | } |
922 | } | 923 | } |
@@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
969 | * to do the further cleanups. | 970 | * to do the further cleanups. |
970 | */ | 971 | */ |
971 | ret = cpuhp_down_callbacks(cpu, st, target); | 972 | ret = cpuhp_down_callbacks(cpu, st, target); |
972 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { | 973 | if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
973 | cpuhp_reset_state(st, prev_state); | 974 | cpuhp_reset_state(st, prev_state); |
974 | __cpuhp_kick_ap(st); | 975 | __cpuhp_kick_ap(st); |
975 | } | 976 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index d896e9ca38b0..f0b58479534f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, | |||
550 | goto out; | 550 | goto out; |
551 | } | 551 | } |
552 | /* a new mm has just been created */ | 552 | /* a new mm has just been created */ |
553 | arch_dup_mmap(oldmm, mm); | 553 | retval = arch_dup_mmap(oldmm, mm); |
554 | retval = 0; | ||
555 | out: | 554 | out: |
556 | up_write(&mm->mmap_sem); | 555 | up_write(&mm->mmap_sem); |
557 | flush_tlb_mm(oldmm); | 556 | flush_tlb_mm(oldmm); |
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index a0a74c533e4b..0913b4d385de 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c | |||
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) | |||
306 | return printk_safe_log_store(s, fmt, args); | 306 | return printk_safe_log_store(s, fmt, args); |
307 | } | 307 | } |
308 | 308 | ||
309 | void printk_nmi_enter(void) | 309 | void notrace printk_nmi_enter(void) |
310 | { | 310 | { |
311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); | 311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); |
312 | } | 312 | } |
313 | 313 | ||
314 | void printk_nmi_exit(void) | 314 | void notrace printk_nmi_exit(void) |
315 | { | 315 | { |
316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); | 316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); |
317 | } | 317 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f74fb00d8064..0e6e97a01942 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags) | |||
133 | spin_unlock_irqrestore(&watchdog_lock, *flags); | 133 | spin_unlock_irqrestore(&watchdog_lock, *flags); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int clocksource_watchdog_kthread(void *data); | ||
137 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | ||
138 | |||
136 | /* | 139 | /* |
137 | * Interval: 0.5sec Threshold: 0.0625s | 140 | * Interval: 0.5sec Threshold: 0.0625s |
138 | */ | 141 | */ |
139 | #define WATCHDOG_INTERVAL (HZ >> 1) | 142 | #define WATCHDOG_INTERVAL (HZ >> 1) |
140 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) | 143 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
141 | 144 | ||
145 | static void clocksource_watchdog_work(struct work_struct *work) | ||
146 | { | ||
147 | /* | ||
148 | * We cannot directly run clocksource_watchdog_kthread() here, because | ||
149 | * clocksource_select() calls timekeeping_notify() which uses | ||
150 | * stop_machine(). One cannot use stop_machine() from a workqueue() due | ||
151 | * lock inversions wrt CPU hotplug. | ||
152 | * | ||
153 | * Also, we only ever run this work once or twice during the lifetime | ||
154 | * of the kernel, so there is no point in creating a more permanent | ||
155 | * kthread for this. | ||
156 | * | ||
157 | * If kthread_run fails the next watchdog scan over the | ||
158 | * watchdog_list will find the unstable clock again. | ||
159 | */ | ||
160 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); | ||
161 | } | ||
162 | |||
142 | static void __clocksource_unstable(struct clocksource *cs) | 163 | static void __clocksource_unstable(struct clocksource *cs) |
143 | { | 164 | { |
144 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 165 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
145 | cs->flags |= CLOCK_SOURCE_UNSTABLE; | 166 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
146 | 167 | ||
147 | /* | 168 | /* |
148 | * If the clocksource is registered clocksource_watchdog_work() will | 169 | * If the clocksource is registered clocksource_watchdog_kthread() will |
149 | * re-rate and re-select. | 170 | * re-rate and re-select. |
150 | */ | 171 | */ |
151 | if (list_empty(&cs->list)) { | 172 | if (list_empty(&cs->list)) { |
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
156 | if (cs->mark_unstable) | 177 | if (cs->mark_unstable) |
157 | cs->mark_unstable(cs); | 178 | cs->mark_unstable(cs); |
158 | 179 | ||
159 | /* kick clocksource_watchdog_work() */ | 180 | /* kick clocksource_watchdog_kthread() */ |
160 | if (finished_booting) | 181 | if (finished_booting) |
161 | schedule_work(&watchdog_work); | 182 | schedule_work(&watchdog_work); |
162 | } | 183 | } |
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
166 | * @cs: clocksource to be marked unstable | 187 | * @cs: clocksource to be marked unstable |
167 | * | 188 | * |
168 | * This function is called by the x86 TSC code to mark clocksources as unstable; | 189 | * This function is called by the x86 TSC code to mark clocksources as unstable; |
169 | * it defers demotion and re-selection to a work. | 190 | * it defers demotion and re-selection to a kthread. |
170 | */ | 191 | */ |
171 | void clocksource_mark_unstable(struct clocksource *cs) | 192 | void clocksource_mark_unstable(struct clocksource *cs) |
172 | { | 193 | { |
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) | |||
391 | } | 412 | } |
392 | } | 413 | } |
393 | 414 | ||
394 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | 415 | static int __clocksource_watchdog_kthread(void) |
395 | |||
396 | static int __clocksource_watchdog_work(void) | ||
397 | { | 416 | { |
398 | struct clocksource *cs, *tmp; | 417 | struct clocksource *cs, *tmp; |
399 | unsigned long flags; | 418 | unsigned long flags; |
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void) | |||
418 | return select; | 437 | return select; |
419 | } | 438 | } |
420 | 439 | ||
421 | static void clocksource_watchdog_work(struct work_struct *work) | 440 | static int clocksource_watchdog_kthread(void *data) |
422 | { | 441 | { |
423 | mutex_lock(&clocksource_mutex); | 442 | mutex_lock(&clocksource_mutex); |
424 | if (__clocksource_watchdog_work()) | 443 | if (__clocksource_watchdog_kthread()) |
425 | clocksource_select(); | 444 | clocksource_select(); |
426 | mutex_unlock(&clocksource_mutex); | 445 | mutex_unlock(&clocksource_mutex); |
446 | return 0; | ||
427 | } | 447 | } |
428 | 448 | ||
429 | static bool clocksource_is_watchdog(struct clocksource *cs) | 449 | static bool clocksource_is_watchdog(struct clocksource *cs) |
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
442 | static void clocksource_select_watchdog(bool fallback) { } | 462 | static void clocksource_select_watchdog(bool fallback) { } |
443 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } | 463 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
444 | static inline void clocksource_resume_watchdog(void) { } | 464 | static inline void clocksource_resume_watchdog(void) { } |
445 | static inline int __clocksource_watchdog_work(void) { return 0; } | 465 | static inline int __clocksource_watchdog_kthread(void) { return 0; } |
446 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } | 466 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } |
447 | void clocksource_mark_unstable(struct clocksource *cs) { } | 467 | void clocksource_mark_unstable(struct clocksource *cs) { } |
448 | 468 | ||
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void) | |||
810 | /* | 830 | /* |
811 | * Run the watchdog first to eliminate unstable clock sources | 831 | * Run the watchdog first to eliminate unstable clock sources |
812 | */ | 832 | */ |
813 | __clocksource_watchdog_work(); | 833 | __clocksource_watchdog_kthread(); |
814 | clocksource_select(); | 834 | clocksource_select(); |
815 | mutex_unlock(&clocksource_mutex); | 835 | mutex_unlock(&clocksource_mutex); |
816 | return 0; | 836 | return 0; |