diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 123 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 4 | ||||
-rw-r--r-- | kernel/exit.c | 16 | ||||
-rw-r--r-- | kernel/fork.c | 27 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 4 | ||||
-rw-r--r-- | kernel/irq/chip.c | 42 | ||||
-rw-r--r-- | kernel/irq/internals.h | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 6 | ||||
-rw-r--r-- | kernel/params.c | 3 | ||||
-rw-r--r-- | kernel/pid.c | 4 | ||||
-rw-r--r-- | kernel/relay.c | 10 | ||||
-rw-r--r-- | kernel/sched/core.c | 24 | ||||
-rw-r--r-- | kernel/sched/fair.c | 36 | ||||
-rw-r--r-- | kernel/sched/rt.c | 5 | ||||
-rw-r--r-- | kernel/watchdog.c | 2 |
16 files changed, 223 insertions, 87 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 32b48c88971..1b5c081d8b9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2300,7 +2300,10 @@ do { \ | |||
2300 | return div64_u64(dividend, divisor); | 2300 | return div64_u64(dividend, divisor); |
2301 | } | 2301 | } |
2302 | 2302 | ||
2303 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 2303 | static DEFINE_PER_CPU(int, perf_throttled_count); |
2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); | ||
2305 | |||
2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) | ||
2304 | { | 2307 | { |
2305 | struct hw_perf_event *hwc = &event->hw; | 2308 | struct hw_perf_event *hwc = &event->hw; |
2306 | s64 period, sample_period; | 2309 | s64 period, sample_period; |
@@ -2319,22 +2322,40 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
2319 | hwc->sample_period = sample_period; | 2322 | hwc->sample_period = sample_period; |
2320 | 2323 | ||
2321 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
2322 | event->pmu->stop(event, PERF_EF_UPDATE); | 2325 | if (disable) |
2326 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
2327 | |||
2323 | local64_set(&hwc->period_left, 0); | 2328 | local64_set(&hwc->period_left, 0); |
2324 | event->pmu->start(event, PERF_EF_RELOAD); | 2329 | |
2330 | if (disable) | ||
2331 | event->pmu->start(event, PERF_EF_RELOAD); | ||
2325 | } | 2332 | } |
2326 | } | 2333 | } |
2327 | 2334 | ||
2328 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | 2335 | /* |
2336 | * combine freq adjustment with unthrottling to avoid two passes over the | ||
2337 | * events. At the same time, make sure, having freq events does not change | ||
2338 | * the rate of unthrottling as that would introduce bias. | ||
2339 | */ | ||
2340 | static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | ||
2341 | int needs_unthr) | ||
2329 | { | 2342 | { |
2330 | struct perf_event *event; | 2343 | struct perf_event *event; |
2331 | struct hw_perf_event *hwc; | 2344 | struct hw_perf_event *hwc; |
2332 | u64 interrupts, now; | 2345 | u64 now, period = TICK_NSEC; |
2333 | s64 delta; | 2346 | s64 delta; |
2334 | 2347 | ||
2335 | if (!ctx->nr_freq) | 2348 | /* |
2349 | * only need to iterate over all events iff: | ||
2350 | * - context have events in frequency mode (needs freq adjust) | ||
2351 | * - there are events to unthrottle on this cpu | ||
2352 | */ | ||
2353 | if (!(ctx->nr_freq || needs_unthr)) | ||
2336 | return; | 2354 | return; |
2337 | 2355 | ||
2356 | raw_spin_lock(&ctx->lock); | ||
2357 | perf_pmu_disable(ctx->pmu); | ||
2358 | |||
2338 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2339 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
2340 | continue; | 2361 | continue; |
@@ -2344,13 +2365,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2344 | 2365 | ||
2345 | hwc = &event->hw; | 2366 | hwc = &event->hw; |
2346 | 2367 | ||
2347 | interrupts = hwc->interrupts; | 2368 | if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) { |
2348 | hwc->interrupts = 0; | 2369 | hwc->interrupts = 0; |
2349 | |||
2350 | /* | ||
2351 | * unthrottle events on the tick | ||
2352 | */ | ||
2353 | if (interrupts == MAX_INTERRUPTS) { | ||
2354 | perf_log_throttle(event, 1); | 2370 | perf_log_throttle(event, 1); |
2355 | event->pmu->start(event, 0); | 2371 | event->pmu->start(event, 0); |
2356 | } | 2372 | } |
@@ -2358,14 +2374,30 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2358 | if (!event->attr.freq || !event->attr.sample_freq) | 2374 | if (!event->attr.freq || !event->attr.sample_freq) |
2359 | continue; | 2375 | continue; |
2360 | 2376 | ||
2361 | event->pmu->read(event); | 2377 | /* |
2378 | * stop the event and update event->count | ||
2379 | */ | ||
2380 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
2381 | |||
2362 | now = local64_read(&event->count); | 2382 | now = local64_read(&event->count); |
2363 | delta = now - hwc->freq_count_stamp; | 2383 | delta = now - hwc->freq_count_stamp; |
2364 | hwc->freq_count_stamp = now; | 2384 | hwc->freq_count_stamp = now; |
2365 | 2385 | ||
2386 | /* | ||
2387 | * restart the event | ||
2388 | * reload only if value has changed | ||
2389 | * we have stopped the event so tell that | ||
2390 | * to perf_adjust_period() to avoid stopping it | ||
2391 | * twice. | ||
2392 | */ | ||
2366 | if (delta > 0) | 2393 | if (delta > 0) |
2367 | perf_adjust_period(event, period, delta); | 2394 | perf_adjust_period(event, period, delta, false); |
2395 | |||
2396 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | ||
2368 | } | 2397 | } |
2398 | |||
2399 | perf_pmu_enable(ctx->pmu); | ||
2400 | raw_spin_unlock(&ctx->lock); | ||
2369 | } | 2401 | } |
2370 | 2402 | ||
2371 | /* | 2403 | /* |
@@ -2388,16 +2420,13 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
2388 | */ | 2420 | */ |
2389 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) | 2421 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) |
2390 | { | 2422 | { |
2391 | u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; | ||
2392 | struct perf_event_context *ctx = NULL; | 2423 | struct perf_event_context *ctx = NULL; |
2393 | int rotate = 0, remove = 1, freq = 0; | 2424 | int rotate = 0, remove = 1; |
2394 | 2425 | ||
2395 | if (cpuctx->ctx.nr_events) { | 2426 | if (cpuctx->ctx.nr_events) { |
2396 | remove = 0; | 2427 | remove = 0; |
2397 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | 2428 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) |
2398 | rotate = 1; | 2429 | rotate = 1; |
2399 | if (cpuctx->ctx.nr_freq) | ||
2400 | freq = 1; | ||
2401 | } | 2430 | } |
2402 | 2431 | ||
2403 | ctx = cpuctx->task_ctx; | 2432 | ctx = cpuctx->task_ctx; |
@@ -2405,37 +2434,26 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
2405 | remove = 0; | 2434 | remove = 0; |
2406 | if (ctx->nr_events != ctx->nr_active) | 2435 | if (ctx->nr_events != ctx->nr_active) |
2407 | rotate = 1; | 2436 | rotate = 1; |
2408 | if (ctx->nr_freq) | ||
2409 | freq = 1; | ||
2410 | } | 2437 | } |
2411 | 2438 | ||
2412 | if (!rotate && !freq) | 2439 | if (!rotate) |
2413 | goto done; | 2440 | goto done; |
2414 | 2441 | ||
2415 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | 2442 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
2416 | perf_pmu_disable(cpuctx->ctx.pmu); | 2443 | perf_pmu_disable(cpuctx->ctx.pmu); |
2417 | 2444 | ||
2418 | if (freq) { | 2445 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
2419 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); | 2446 | if (ctx) |
2420 | if (ctx) | 2447 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); |
2421 | perf_ctx_adjust_freq(ctx, interval); | ||
2422 | } | ||
2423 | |||
2424 | if (rotate) { | ||
2425 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | ||
2426 | if (ctx) | ||
2427 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); | ||
2428 | 2448 | ||
2429 | rotate_ctx(&cpuctx->ctx); | 2449 | rotate_ctx(&cpuctx->ctx); |
2430 | if (ctx) | 2450 | if (ctx) |
2431 | rotate_ctx(ctx); | 2451 | rotate_ctx(ctx); |
2432 | 2452 | ||
2433 | perf_event_sched_in(cpuctx, ctx, current); | 2453 | perf_event_sched_in(cpuctx, ctx, current); |
2434 | } | ||
2435 | 2454 | ||
2436 | perf_pmu_enable(cpuctx->ctx.pmu); | 2455 | perf_pmu_enable(cpuctx->ctx.pmu); |
2437 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | 2456 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
2438 | |||
2439 | done: | 2457 | done: |
2440 | if (remove) | 2458 | if (remove) |
2441 | list_del_init(&cpuctx->rotation_list); | 2459 | list_del_init(&cpuctx->rotation_list); |
@@ -2445,10 +2463,22 @@ void perf_event_task_tick(void) | |||
2445 | { | 2463 | { |
2446 | struct list_head *head = &__get_cpu_var(rotation_list); | 2464 | struct list_head *head = &__get_cpu_var(rotation_list); |
2447 | struct perf_cpu_context *cpuctx, *tmp; | 2465 | struct perf_cpu_context *cpuctx, *tmp; |
2466 | struct perf_event_context *ctx; | ||
2467 | int throttled; | ||
2448 | 2468 | ||
2449 | WARN_ON(!irqs_disabled()); | 2469 | WARN_ON(!irqs_disabled()); |
2450 | 2470 | ||
2471 | __this_cpu_inc(perf_throttled_seq); | ||
2472 | throttled = __this_cpu_xchg(perf_throttled_count, 0); | ||
2473 | |||
2451 | list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { | 2474 | list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { |
2475 | ctx = &cpuctx->ctx; | ||
2476 | perf_adjust_freq_unthr_context(ctx, throttled); | ||
2477 | |||
2478 | ctx = cpuctx->task_ctx; | ||
2479 | if (ctx) | ||
2480 | perf_adjust_freq_unthr_context(ctx, throttled); | ||
2481 | |||
2452 | if (cpuctx->jiffies_interval == 1 || | 2482 | if (cpuctx->jiffies_interval == 1 || |
2453 | !(jiffies % cpuctx->jiffies_interval)) | 2483 | !(jiffies % cpuctx->jiffies_interval)) |
2454 | perf_rotate_context(cpuctx); | 2484 | perf_rotate_context(cpuctx); |
@@ -4509,6 +4539,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4509 | { | 4539 | { |
4510 | int events = atomic_read(&event->event_limit); | 4540 | int events = atomic_read(&event->event_limit); |
4511 | struct hw_perf_event *hwc = &event->hw; | 4541 | struct hw_perf_event *hwc = &event->hw; |
4542 | u64 seq; | ||
4512 | int ret = 0; | 4543 | int ret = 0; |
4513 | 4544 | ||
4514 | /* | 4545 | /* |
@@ -4518,14 +4549,20 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4518 | if (unlikely(!is_sampling_event(event))) | 4549 | if (unlikely(!is_sampling_event(event))) |
4519 | return 0; | 4550 | return 0; |
4520 | 4551 | ||
4521 | if (unlikely(hwc->interrupts >= max_samples_per_tick)) { | 4552 | seq = __this_cpu_read(perf_throttled_seq); |
4522 | if (throttle) { | 4553 | if (seq != hwc->interrupts_seq) { |
4554 | hwc->interrupts_seq = seq; | ||
4555 | hwc->interrupts = 1; | ||
4556 | } else { | ||
4557 | hwc->interrupts++; | ||
4558 | if (unlikely(throttle | ||
4559 | && hwc->interrupts >= max_samples_per_tick)) { | ||
4560 | __this_cpu_inc(perf_throttled_count); | ||
4523 | hwc->interrupts = MAX_INTERRUPTS; | 4561 | hwc->interrupts = MAX_INTERRUPTS; |
4524 | perf_log_throttle(event, 0); | 4562 | perf_log_throttle(event, 0); |
4525 | ret = 1; | 4563 | ret = 1; |
4526 | } | 4564 | } |
4527 | } else | 4565 | } |
4528 | hwc->interrupts++; | ||
4529 | 4566 | ||
4530 | if (event->attr.freq) { | 4567 | if (event->attr.freq) { |
4531 | u64 now = perf_clock(); | 4568 | u64 now = perf_clock(); |
@@ -4534,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4534 | hwc->freq_time_stamp = now; | 4571 | hwc->freq_time_stamp = now; |
4535 | 4572 | ||
4536 | if (delta > 0 && delta < 2*TICK_NSEC) | 4573 | if (delta > 0 && delta < 2*TICK_NSEC) |
4537 | perf_adjust_period(event, delta, hwc->last_period); | 4574 | perf_adjust_period(event, delta, hwc->last_period, true); |
4538 | } | 4575 | } |
4539 | 4576 | ||
4540 | /* | 4577 | /* |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b7971d6f38b..ee706ce44aa 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -651,10 +651,10 @@ int __init init_hw_breakpoint(void) | |||
651 | 651 | ||
652 | err_alloc: | 652 | err_alloc: |
653 | for_each_possible_cpu(err_cpu) { | 653 | for_each_possible_cpu(err_cpu) { |
654 | if (err_cpu == cpu) | ||
655 | break; | ||
656 | for (i = 0; i < TYPE_MAX; i++) | 654 | for (i = 0; i < TYPE_MAX; i++) |
657 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); | 655 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); |
656 | if (err_cpu == cpu) | ||
657 | break; | ||
658 | } | 658 | } |
659 | 659 | ||
660 | return -ENOMEM; | 660 | return -ENOMEM; |
diff --git a/kernel/exit.c b/kernel/exit.c index fd0af05e063..8e6b0e626b9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1038,6 +1038,22 @@ void do_exit(long code) | |||
1038 | if (tsk->nr_dirtied) | 1038 | if (tsk->nr_dirtied) |
1039 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | 1039 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); |
1040 | exit_rcu(); | 1040 | exit_rcu(); |
1041 | |||
1042 | /* | ||
1043 | * The setting of TASK_RUNNING by try_to_wake_up() may be delayed | ||
1044 | * when the following two conditions become true. | ||
1045 | * - There is race condition of mmap_sem (It is acquired by | ||
1046 | * exit_mm()), and | ||
1047 | * - SMI occurs before setting TASK_RUNINNG. | ||
1048 | * (or hypervisor of virtual machine switches to other guest) | ||
1049 | * As a result, we may become TASK_RUNNING after becoming TASK_DEAD | ||
1050 | * | ||
1051 | * To avoid it, we have to wait for releasing tsk->pi_lock which | ||
1052 | * is held by try_to_wake_up() | ||
1053 | */ | ||
1054 | smp_mb(); | ||
1055 | raw_spin_unlock_wait(&tsk->pi_lock); | ||
1056 | |||
1041 | /* causes final put_task_struct in finish_task_switch(). */ | 1057 | /* causes final put_task_struct in finish_task_switch(). */ |
1042 | tsk->state = TASK_DEAD; | 1058 | tsk->state = TASK_DEAD; |
1043 | tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ | 1059 | tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 051f090d40c..e2cd3e2a5ae 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/user-return-notifier.h> | 66 | #include <linux/user-return-notifier.h> |
67 | #include <linux/oom.h> | 67 | #include <linux/oom.h> |
68 | #include <linux/khugepaged.h> | 68 | #include <linux/khugepaged.h> |
69 | #include <linux/signalfd.h> | ||
69 | 70 | ||
70 | #include <asm/pgtable.h> | 71 | #include <asm/pgtable.h> |
71 | #include <asm/pgalloc.h> | 72 | #include <asm/pgalloc.h> |
@@ -647,6 +648,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) | |||
647 | } | 648 | } |
648 | EXPORT_SYMBOL_GPL(get_task_mm); | 649 | EXPORT_SYMBOL_GPL(get_task_mm); |
649 | 650 | ||
651 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | ||
652 | { | ||
653 | struct mm_struct *mm; | ||
654 | int err; | ||
655 | |||
656 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); | ||
657 | if (err) | ||
658 | return ERR_PTR(err); | ||
659 | |||
660 | mm = get_task_mm(task); | ||
661 | if (mm && mm != current->mm && | ||
662 | !ptrace_may_access(task, mode)) { | ||
663 | mmput(mm); | ||
664 | mm = ERR_PTR(-EACCES); | ||
665 | } | ||
666 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
667 | |||
668 | return mm; | ||
669 | } | ||
670 | |||
650 | /* Please note the differences between mmput and mm_release. | 671 | /* Please note the differences between mmput and mm_release. |
651 | * mmput is called whenever we stop holding onto a mm_struct, | 672 | * mmput is called whenever we stop holding onto a mm_struct, |
652 | * error success whatever. | 673 | * error success whatever. |
@@ -890,7 +911,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) | |||
890 | return -ENOMEM; | 911 | return -ENOMEM; |
891 | 912 | ||
892 | new_ioc->ioprio = ioc->ioprio; | 913 | new_ioc->ioprio = ioc->ioprio; |
893 | put_io_context(new_ioc, NULL); | 914 | put_io_context(new_ioc); |
894 | } | 915 | } |
895 | #endif | 916 | #endif |
896 | return 0; | 917 | return 0; |
@@ -915,8 +936,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) | |||
915 | 936 | ||
916 | void __cleanup_sighand(struct sighand_struct *sighand) | 937 | void __cleanup_sighand(struct sighand_struct *sighand) |
917 | { | 938 | { |
918 | if (atomic_dec_and_test(&sighand->count)) | 939 | if (atomic_dec_and_test(&sighand->count)) { |
940 | signalfd_cleanup(sighand); | ||
919 | kmem_cache_free(sighand_cachep, sighand); | 941 | kmem_cache_free(sighand_cachep, sighand); |
942 | } | ||
920 | } | 943 | } |
921 | 944 | ||
922 | 945 | ||
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 342d8f44e40..0119b9d467a 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void) | |||
53 | if (desc->irq_data.chip->irq_set_type) | 53 | if (desc->irq_data.chip->irq_set_type) |
54 | desc->irq_data.chip->irq_set_type(&desc->irq_data, | 54 | desc->irq_data.chip->irq_set_type(&desc->irq_data, |
55 | IRQ_TYPE_PROBE); | 55 | IRQ_TYPE_PROBE); |
56 | irq_startup(desc); | 56 | irq_startup(desc, false); |
57 | } | 57 | } |
58 | raw_spin_unlock_irq(&desc->lock); | 58 | raw_spin_unlock_irq(&desc->lock); |
59 | } | 59 | } |
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void) | |||
70 | raw_spin_lock_irq(&desc->lock); | 70 | raw_spin_lock_irq(&desc->lock); |
71 | if (!desc->action && irq_settings_can_probe(desc)) { | 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; | 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
73 | if (irq_startup(desc)) | 73 | if (irq_startup(desc, false)) |
74 | desc->istate |= IRQS_PENDING; | 74 | desc->istate |= IRQS_PENDING; |
75 | } | 75 | } |
76 | raw_spin_unlock_irq(&desc->lock); | 76 | raw_spin_unlock_irq(&desc->lock); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f7c543a801d..fb7db75ee0c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc) | |||
157 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); | 157 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
158 | } | 158 | } |
159 | 159 | ||
160 | int irq_startup(struct irq_desc *desc) | 160 | int irq_startup(struct irq_desc *desc, bool resend) |
161 | { | 161 | { |
162 | int ret = 0; | ||
163 | |||
162 | irq_state_clr_disabled(desc); | 164 | irq_state_clr_disabled(desc); |
163 | desc->depth = 0; | 165 | desc->depth = 0; |
164 | 166 | ||
165 | if (desc->irq_data.chip->irq_startup) { | 167 | if (desc->irq_data.chip->irq_startup) { |
166 | int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | 168 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
167 | irq_state_clr_masked(desc); | 169 | irq_state_clr_masked(desc); |
168 | return ret; | 170 | } else { |
171 | irq_enable(desc); | ||
169 | } | 172 | } |
170 | 173 | if (resend) | |
171 | irq_enable(desc); | 174 | check_irq_resend(desc, desc->irq_data.irq); |
172 | return 0; | 175 | return ret; |
173 | } | 176 | } |
174 | 177 | ||
175 | void irq_shutdown(struct irq_desc *desc) | 178 | void irq_shutdown(struct irq_desc *desc) |
@@ -330,6 +333,24 @@ out_unlock: | |||
330 | } | 333 | } |
331 | EXPORT_SYMBOL_GPL(handle_simple_irq); | 334 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
332 | 335 | ||
336 | /* | ||
337 | * Called unconditionally from handle_level_irq() and only for oneshot | ||
338 | * interrupts from handle_fasteoi_irq() | ||
339 | */ | ||
340 | static void cond_unmask_irq(struct irq_desc *desc) | ||
341 | { | ||
342 | /* | ||
343 | * We need to unmask in the following cases: | ||
344 | * - Standard level irq (IRQF_ONESHOT is not set) | ||
345 | * - Oneshot irq which did not wake the thread (caused by a | ||
346 | * spurious interrupt or a primary handler handling it | ||
347 | * completely). | ||
348 | */ | ||
349 | if (!irqd_irq_disabled(&desc->irq_data) && | ||
350 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | ||
351 | unmask_irq(desc); | ||
352 | } | ||
353 | |||
333 | /** | 354 | /** |
334 | * handle_level_irq - Level type irq handler | 355 | * handle_level_irq - Level type irq handler |
335 | * @irq: the interrupt number | 356 | * @irq: the interrupt number |
@@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
362 | 383 | ||
363 | handle_irq_event(desc); | 384 | handle_irq_event(desc); |
364 | 385 | ||
365 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) | 386 | cond_unmask_irq(desc); |
366 | unmask_irq(desc); | 387 | |
367 | out_unlock: | 388 | out_unlock: |
368 | raw_spin_unlock(&desc->lock); | 389 | raw_spin_unlock(&desc->lock); |
369 | } | 390 | } |
@@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
417 | preflow_handler(desc); | 438 | preflow_handler(desc); |
418 | handle_irq_event(desc); | 439 | handle_irq_event(desc); |
419 | 440 | ||
441 | if (desc->istate & IRQS_ONESHOT) | ||
442 | cond_unmask_irq(desc); | ||
443 | |||
420 | out_eoi: | 444 | out_eoi: |
421 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | 445 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
422 | out_unlock: | 446 | out_unlock: |
@@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
625 | irq_settings_set_noprobe(desc); | 649 | irq_settings_set_noprobe(desc); |
626 | irq_settings_set_norequest(desc); | 650 | irq_settings_set_norequest(desc); |
627 | irq_settings_set_nothread(desc); | 651 | irq_settings_set_nothread(desc); |
628 | irq_startup(desc); | 652 | irq_startup(desc, true); |
629 | } | 653 | } |
630 | out: | 654 | out: |
631 | irq_put_desc_busunlock(desc, flags); | 655 | irq_put_desc_busunlock(desc, flags); |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b7952316016..40378ff877e 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
67 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 67 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
68 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 68 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
69 | 69 | ||
70 | extern int irq_startup(struct irq_desc *desc); | 70 | extern int irq_startup(struct irq_desc *desc, bool resend); |
71 | extern void irq_shutdown(struct irq_desc *desc); | 71 | extern void irq_shutdown(struct irq_desc *desc); |
72 | extern void irq_enable(struct irq_desc *desc); | 72 | extern void irq_enable(struct irq_desc *desc); |
73 | extern void irq_disable(struct irq_desc *desc); | 73 | extern void irq_disable(struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a9a9dbe49fe..32313c08444 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1027 | desc->istate |= IRQS_ONESHOT; | 1027 | desc->istate |= IRQS_ONESHOT; |
1028 | 1028 | ||
1029 | if (irq_settings_can_autoenable(desc)) | 1029 | if (irq_settings_can_autoenable(desc)) |
1030 | irq_startup(desc); | 1030 | irq_startup(desc, true); |
1031 | else | 1031 | else |
1032 | /* Undo nested disables: */ | 1032 | /* Undo nested disables: */ |
1033 | desc->depth = 1; | 1033 | desc->depth = 1; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 29f5b65bee2..9788c0ec6f4 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1673 | ri->rp = rp; | 1673 | ri->rp = rp; |
1674 | ri->task = current; | 1674 | ri->task = current; |
1675 | 1675 | ||
1676 | if (rp->entry_handler && rp->entry_handler(ri, regs)) | 1676 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
1677 | raw_spin_lock_irqsave(&rp->lock, flags); | ||
1678 | hlist_add_head(&ri->hlist, &rp->free_instances); | ||
1679 | raw_spin_unlock_irqrestore(&rp->lock, flags); | ||
1677 | return 0; | 1680 | return 0; |
1681 | } | ||
1678 | 1682 | ||
1679 | arch_prepare_kretprobe(ri, regs); | 1683 | arch_prepare_kretprobe(ri, regs); |
1680 | 1684 | ||
diff --git a/kernel/params.c b/kernel/params.c index 32ee0430828..4bc965d8a1f 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -97,7 +97,8 @@ static int parse_one(char *param, | |||
97 | for (i = 0; i < num_params; i++) { | 97 | for (i = 0; i < num_params; i++) { |
98 | if (parameq(param, params[i].name)) { | 98 | if (parameq(param, params[i].name)) { |
99 | /* No one handled NULL, so do it here. */ | 99 | /* No one handled NULL, so do it here. */ |
100 | if (!val && params[i].ops->set != param_set_bool) | 100 | if (!val && params[i].ops->set != param_set_bool |
101 | && params[i].ops->set != param_set_bint) | ||
101 | return -EINVAL; | 102 | return -EINVAL; |
102 | pr_debug("They are equal! Calling %p\n", | 103 | pr_debug("They are equal! Calling %p\n", |
103 | params[i].ops->set); | 104 | params[i].ops->set); |
diff --git a/kernel/pid.c b/kernel/pid.c index ce8e00deacc..9f08dfabaf1 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) | |||
543 | */ | 543 | */ |
544 | void __init pidhash_init(void) | 544 | void __init pidhash_init(void) |
545 | { | 545 | { |
546 | int i, pidhash_size; | 546 | unsigned int i, pidhash_size; |
547 | 547 | ||
548 | pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, | 548 | pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, |
549 | HASH_EARLY | HASH_SMALL, | 549 | HASH_EARLY | HASH_SMALL, |
550 | &pidhash_shift, NULL, 4096); | 550 | &pidhash_shift, NULL, 4096); |
551 | pidhash_size = 1 << pidhash_shift; | 551 | pidhash_size = 1U << pidhash_shift; |
552 | 552 | ||
553 | for (i = 0; i < pidhash_size; i++) | 553 | for (i = 0; i < pidhash_size; i++) |
554 | INIT_HLIST_HEAD(&pid_hash[i]); | 554 | INIT_HLIST_HEAD(&pid_hash[i]); |
diff --git a/kernel/relay.c b/kernel/relay.c index 4335e1d7ee2..ab56a1764d4 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -164,10 +164,14 @@ depopulate: | |||
164 | */ | 164 | */ |
165 | static struct rchan_buf *relay_create_buf(struct rchan *chan) | 165 | static struct rchan_buf *relay_create_buf(struct rchan *chan) |
166 | { | 166 | { |
167 | struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); | 167 | struct rchan_buf *buf; |
168 | if (!buf) | 168 | |
169 | if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) | ||
169 | return NULL; | 170 | return NULL; |
170 | 171 | ||
172 | buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); | ||
173 | if (!buf) | ||
174 | return NULL; | ||
171 | buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); | 175 | buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); |
172 | if (!buf->padding) | 176 | if (!buf->padding) |
173 | goto free_buf; | 177 | goto free_buf; |
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename, | |||
574 | 578 | ||
575 | if (!(subbuf_size && n_subbufs)) | 579 | if (!(subbuf_size && n_subbufs)) |
576 | return NULL; | 580 | return NULL; |
581 | if (subbuf_size > UINT_MAX / n_subbufs) | ||
582 | return NULL; | ||
577 | 583 | ||
578 | chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); | 584 | chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); |
579 | if (!chan) | 585 | if (!chan) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df00cb09263..33a0676ea74 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -74,6 +74,7 @@ | |||
74 | 74 | ||
75 | #include <asm/tlb.h> | 75 | #include <asm/tlb.h> |
76 | #include <asm/irq_regs.h> | 76 | #include <asm/irq_regs.h> |
77 | #include <asm/mutex.h> | ||
77 | #ifdef CONFIG_PARAVIRT | 78 | #ifdef CONFIG_PARAVIRT |
78 | #include <asm/paravirt.h> | 79 | #include <asm/paravirt.h> |
79 | #endif | 80 | #endif |
@@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | |||
723 | p->sched_class->dequeue_task(rq, p, flags); | 724 | p->sched_class->dequeue_task(rq, p, flags); |
724 | } | 725 | } |
725 | 726 | ||
726 | /* | ||
727 | * activate_task - move a task to the runqueue. | ||
728 | */ | ||
729 | void activate_task(struct rq *rq, struct task_struct *p, int flags) | 727 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
730 | { | 728 | { |
731 | if (task_contributes_to_load(p)) | 729 | if (task_contributes_to_load(p)) |
@@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) | |||
734 | enqueue_task(rq, p, flags); | 732 | enqueue_task(rq, p, flags); |
735 | } | 733 | } |
736 | 734 | ||
737 | /* | ||
738 | * deactivate_task - remove a task from the runqueue. | ||
739 | */ | ||
740 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | 735 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
741 | { | 736 | { |
742 | if (task_contributes_to_load(p)) | 737 | if (task_contributes_to_load(p)) |
@@ -1937,7 +1932,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1937 | local_irq_enable(); | 1932 | local_irq_enable(); |
1938 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 1933 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
1939 | finish_lock_switch(rq, prev); | 1934 | finish_lock_switch(rq, prev); |
1940 | trace_sched_stat_sleeptime(current, rq->clock); | ||
1941 | 1935 | ||
1942 | fire_sched_in_preempt_notifiers(current); | 1936 | fire_sched_in_preempt_notifiers(current); |
1943 | if (mm) | 1937 | if (mm) |
@@ -4134,7 +4128,7 @@ recheck: | |||
4134 | on_rq = p->on_rq; | 4128 | on_rq = p->on_rq; |
4135 | running = task_current(rq, p); | 4129 | running = task_current(rq, p); |
4136 | if (on_rq) | 4130 | if (on_rq) |
4137 | deactivate_task(rq, p, 0); | 4131 | dequeue_task(rq, p, 0); |
4138 | if (running) | 4132 | if (running) |
4139 | p->sched_class->put_prev_task(rq, p); | 4133 | p->sched_class->put_prev_task(rq, p); |
4140 | 4134 | ||
@@ -4147,7 +4141,7 @@ recheck: | |||
4147 | if (running) | 4141 | if (running) |
4148 | p->sched_class->set_curr_task(rq); | 4142 | p->sched_class->set_curr_task(rq); |
4149 | if (on_rq) | 4143 | if (on_rq) |
4150 | activate_task(rq, p, 0); | 4144 | enqueue_task(rq, p, 0); |
4151 | 4145 | ||
4152 | check_class_changed(rq, p, prev_class, oldprio); | 4146 | check_class_changed(rq, p, prev_class, oldprio); |
4153 | task_rq_unlock(rq, p, &flags); | 4147 | task_rq_unlock(rq, p, &flags); |
@@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4998 | * placed properly. | 4992 | * placed properly. |
4999 | */ | 4993 | */ |
5000 | if (p->on_rq) { | 4994 | if (p->on_rq) { |
5001 | deactivate_task(rq_src, p, 0); | 4995 | dequeue_task(rq_src, p, 0); |
5002 | set_task_cpu(p, dest_cpu); | 4996 | set_task_cpu(p, dest_cpu); |
5003 | activate_task(rq_dest, p, 0); | 4997 | enqueue_task(rq_dest, p, 0); |
5004 | check_preempt_curr(rq_dest, p, 0); | 4998 | check_preempt_curr(rq_dest, p, 0); |
5005 | } | 4999 | } |
5006 | done: | 5000 | done: |
@@ -6734,7 +6728,7 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev) | |||
6734 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, | 6728 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
6735 | void *hcpu) | 6729 | void *hcpu) |
6736 | { | 6730 | { |
6737 | switch (action & ~CPU_TASKS_FROZEN) { | 6731 | switch (action) { |
6738 | case CPU_ONLINE: | 6732 | case CPU_ONLINE: |
6739 | case CPU_DOWN_FAILED: | 6733 | case CPU_DOWN_FAILED: |
6740 | cpuset_update_active_cpus(); | 6734 | cpuset_update_active_cpus(); |
@@ -6747,7 +6741,7 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, | |||
6747 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | 6741 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
6748 | void *hcpu) | 6742 | void *hcpu) |
6749 | { | 6743 | { |
6750 | switch (action & ~CPU_TASKS_FROZEN) { | 6744 | switch (action) { |
6751 | case CPU_DOWN_PREPARE: | 6745 | case CPU_DOWN_PREPARE: |
6752 | cpuset_update_active_cpus(); | 6746 | cpuset_update_active_cpus(); |
6753 | return NOTIFY_OK; | 6747 | return NOTIFY_OK; |
@@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
7032 | 7026 | ||
7033 | on_rq = p->on_rq; | 7027 | on_rq = p->on_rq; |
7034 | if (on_rq) | 7028 | if (on_rq) |
7035 | deactivate_task(rq, p, 0); | 7029 | dequeue_task(rq, p, 0); |
7036 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 7030 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
7037 | if (on_rq) { | 7031 | if (on_rq) { |
7038 | activate_task(rq, p, 0); | 7032 | enqueue_task(rq, p, 0); |
7039 | resched_task(rq->curr); | 7033 | resched_task(rq->curr); |
7040 | } | 7034 | } |
7041 | 7035 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 84adb2d66cb..aca16b843b7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1003,6 +1003,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
1003 | if (unlikely(delta > se->statistics.sleep_max)) | 1003 | if (unlikely(delta > se->statistics.sleep_max)) |
1004 | se->statistics.sleep_max = delta; | 1004 | se->statistics.sleep_max = delta; |
1005 | 1005 | ||
1006 | se->statistics.sleep_start = 0; | ||
1006 | se->statistics.sum_sleep_runtime += delta; | 1007 | se->statistics.sum_sleep_runtime += delta; |
1007 | 1008 | ||
1008 | if (tsk) { | 1009 | if (tsk) { |
@@ -1019,6 +1020,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
1019 | if (unlikely(delta > se->statistics.block_max)) | 1020 | if (unlikely(delta > se->statistics.block_max)) |
1020 | se->statistics.block_max = delta; | 1021 | se->statistics.block_max = delta; |
1021 | 1022 | ||
1023 | se->statistics.block_start = 0; | ||
1022 | se->statistics.sum_sleep_runtime += delta; | 1024 | se->statistics.sum_sleep_runtime += delta; |
1023 | 1025 | ||
1024 | if (tsk) { | 1026 | if (tsk) { |
@@ -4866,6 +4868,15 @@ static void nohz_balancer_kick(int cpu) | |||
4866 | return; | 4868 | return; |
4867 | } | 4869 | } |
4868 | 4870 | ||
4871 | static inline void clear_nohz_tick_stopped(int cpu) | ||
4872 | { | ||
4873 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | ||
4874 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
4875 | atomic_dec(&nohz.nr_cpus); | ||
4876 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | ||
4877 | } | ||
4878 | } | ||
4879 | |||
4869 | static inline void set_cpu_sd_state_busy(void) | 4880 | static inline void set_cpu_sd_state_busy(void) |
4870 | { | 4881 | { |
4871 | struct sched_domain *sd; | 4882 | struct sched_domain *sd; |
@@ -4904,6 +4915,12 @@ void select_nohz_load_balancer(int stop_tick) | |||
4904 | { | 4915 | { |
4905 | int cpu = smp_processor_id(); | 4916 | int cpu = smp_processor_id(); |
4906 | 4917 | ||
4918 | /* | ||
4919 | * If this cpu is going down, then nothing needs to be done. | ||
4920 | */ | ||
4921 | if (!cpu_active(cpu)) | ||
4922 | return; | ||
4923 | |||
4907 | if (stop_tick) { | 4924 | if (stop_tick) { |
4908 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) | 4925 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) |
4909 | return; | 4926 | return; |
@@ -4914,6 +4931,18 @@ void select_nohz_load_balancer(int stop_tick) | |||
4914 | } | 4931 | } |
4915 | return; | 4932 | return; |
4916 | } | 4933 | } |
4934 | |||
4935 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | ||
4936 | unsigned long action, void *hcpu) | ||
4937 | { | ||
4938 | switch (action & ~CPU_TASKS_FROZEN) { | ||
4939 | case CPU_DYING: | ||
4940 | clear_nohz_tick_stopped(smp_processor_id()); | ||
4941 | return NOTIFY_OK; | ||
4942 | default: | ||
4943 | return NOTIFY_DONE; | ||
4944 | } | ||
4945 | } | ||
4917 | #endif | 4946 | #endif |
4918 | 4947 | ||
4919 | static DEFINE_SPINLOCK(balancing); | 4948 | static DEFINE_SPINLOCK(balancing); |
@@ -5070,11 +5099,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
5070 | * busy tick after returning from idle, we will update the busy stats. | 5099 | * busy tick after returning from idle, we will update the busy stats. |
5071 | */ | 5100 | */ |
5072 | set_cpu_sd_state_busy(); | 5101 | set_cpu_sd_state_busy(); |
5073 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | 5102 | clear_nohz_tick_stopped(cpu); |
5074 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | ||
5075 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
5076 | atomic_dec(&nohz.nr_cpus); | ||
5077 | } | ||
5078 | 5103 | ||
5079 | /* | 5104 | /* |
5080 | * None are in tickless mode and hence no need for NOHZ idle load | 5105 | * None are in tickless mode and hence no need for NOHZ idle load |
@@ -5590,6 +5615,7 @@ __init void init_sched_fair_class(void) | |||
5590 | 5615 | ||
5591 | #ifdef CONFIG_NO_HZ | 5616 | #ifdef CONFIG_NO_HZ |
5592 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 5617 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
5618 | cpu_notifier(sched_ilb_notifier, 0); | ||
5593 | #endif | 5619 | #endif |
5594 | #endif /* SMP */ | 5620 | #endif /* SMP */ |
5595 | 5621 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 3640ebbb466..f42ae7fb5ec 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq) | |||
1587 | if (!next_task) | 1587 | if (!next_task) |
1588 | return 0; | 1588 | return 0; |
1589 | 1589 | ||
1590 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1591 | if (unlikely(task_running(rq, next_task))) | ||
1592 | return 0; | ||
1593 | #endif | ||
1594 | |||
1590 | retry: | 1595 | retry: |
1591 | if (unlikely(next_task == rq->curr)) { | 1596 | if (unlikely(next_task == rq->curr)) { |
1592 | WARN_ON(1); | 1597 | WARN_ON(1); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1d7bca7f4f5..d117262deba 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
296 | if (__this_cpu_read(soft_watchdog_warn) == true) | 296 | if (__this_cpu_read(soft_watchdog_warn) == true) |
297 | return HRTIMER_RESTART; | 297 | return HRTIMER_RESTART; |
298 | 298 | ||
299 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 299 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
300 | smp_processor_id(), duration, | 300 | smp_processor_id(), duration, |
301 | current->comm, task_pid_nr(current)); | 301 | current->comm, task_pid_nr(current)); |
302 | print_modules(); | 302 | print_modules(); |