diff options
| author | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2012-02-21 02:32:19 -0500 |
|---|---|---|
| committer | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2012-02-21 02:32:19 -0500 |
| commit | 3f60db4bde17088feed5f143582d7661cdbb9a01 (patch) | |
| tree | 21a7866ae6d199cfa8f619ced9500687bdf84f18 /kernel | |
| parent | 5e36097889725dbe4f098c3f1e93cb2f21cae6ee (diff) | |
| parent | b01543dfe67bb1d191998e90d20534dc354de059 (diff) | |
Merge commit 'v3.3-rc4'
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/auditsc.c | 5 | ||||
| -rw-r--r-- | kernel/events/callchain.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 125 | ||||
| -rw-r--r-- | kernel/exit.c | 16 | ||||
| -rw-r--r-- | kernel/fork.c | 22 | ||||
| -rw-r--r-- | kernel/kprobes.c | 8 | ||||
| -rw-r--r-- | kernel/params.c | 3 | ||||
| -rw-r--r-- | kernel/power/power.h | 24 | ||||
| -rw-r--r-- | kernel/power/process.c | 26 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 3 | ||||
| -rw-r--r-- | kernel/power/user.c | 15 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 8 | ||||
| -rw-r--r-- | kernel/relay.c | 10 | ||||
| -rw-r--r-- | kernel/res_counter.c | 25 | ||||
| -rw-r--r-- | kernel/sched/core.c | 19 | ||||
| -rw-r--r-- | kernel/sched/cpupri.c | 3 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 34 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 5 | ||||
| -rw-r--r-- | kernel/watchdog.c | 2 |
19 files changed, 270 insertions, 85 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index caaea6e944f8..af1de0f34eae 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -1863,11 +1863,12 @@ void __audit_syscall_entry(int arch, int major, | |||
| 1863 | 1863 | ||
| 1864 | /** | 1864 | /** |
| 1865 | * audit_syscall_exit - deallocate audit context after a system call | 1865 | * audit_syscall_exit - deallocate audit context after a system call |
| 1866 | * @pt_regs: syscall registers | 1866 | * @success: success value of the syscall |
| 1867 | * @return_code: return value of the syscall | ||
| 1867 | * | 1868 | * |
| 1868 | * Tear down after system call. If the audit context has been marked as | 1869 | * Tear down after system call. If the audit context has been marked as |
| 1869 | * auditable (either because of the AUDIT_RECORD_CONTEXT state from | 1870 | * auditable (either because of the AUDIT_RECORD_CONTEXT state from |
| 1870 | * filtering, or because some other part of the kernel write an audit | 1871 | * filtering, or because some other part of the kernel wrote an audit |
| 1871 | * message), then write out the syscall information. In call cases, | 1872 | * message), then write out the syscall information. In call cases, |
| 1872 | * free the names stored from getname(). | 1873 | * free the names stored from getname(). |
| 1873 | */ | 1874 | */ |
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 057e24b665cf..6581a040f399 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c | |||
| @@ -115,8 +115,6 @@ int get_callchain_buffers(void) | |||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | err = alloc_callchain_buffers(); | 117 | err = alloc_callchain_buffers(); |
| 118 | if (err) | ||
| 119 | release_callchain_buffers(); | ||
| 120 | exit: | 118 | exit: |
| 121 | mutex_unlock(&callchain_mutex); | 119 | mutex_unlock(&callchain_mutex); |
| 122 | 120 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index a8f4ac001a00..1b5c081d8b9f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -815,7 +815,7 @@ static void update_event_times(struct perf_event *event) | |||
| 815 | * here. | 815 | * here. |
| 816 | */ | 816 | */ |
| 817 | if (is_cgroup_event(event)) | 817 | if (is_cgroup_event(event)) |
| 818 | run_end = perf_event_time(event); | 818 | run_end = perf_cgroup_event_time(event); |
| 819 | else if (ctx->is_active) | 819 | else if (ctx->is_active) |
| 820 | run_end = ctx->time; | 820 | run_end = ctx->time; |
| 821 | else | 821 | else |
| @@ -2300,7 +2300,10 @@ do { \ | |||
| 2300 | return div64_u64(dividend, divisor); | 2300 | return div64_u64(dividend, divisor); |
| 2301 | } | 2301 | } |
| 2302 | 2302 | ||
| 2303 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 2303 | static DEFINE_PER_CPU(int, perf_throttled_count); |
| 2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); | ||
| 2305 | |||
| 2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) | ||
| 2304 | { | 2307 | { |
| 2305 | struct hw_perf_event *hwc = &event->hw; | 2308 | struct hw_perf_event *hwc = &event->hw; |
| 2306 | s64 period, sample_period; | 2309 | s64 period, sample_period; |
| @@ -2319,22 +2322,40 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
| 2319 | hwc->sample_period = sample_period; | 2322 | hwc->sample_period = sample_period; |
| 2320 | 2323 | ||
| 2321 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
| 2322 | event->pmu->stop(event, PERF_EF_UPDATE); | 2325 | if (disable) |
| 2326 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
| 2327 | |||
| 2323 | local64_set(&hwc->period_left, 0); | 2328 | local64_set(&hwc->period_left, 0); |
| 2324 | event->pmu->start(event, PERF_EF_RELOAD); | 2329 | |
| 2330 | if (disable) | ||
| 2331 | event->pmu->start(event, PERF_EF_RELOAD); | ||
| 2325 | } | 2332 | } |
| 2326 | } | 2333 | } |
| 2327 | 2334 | ||
| 2328 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | 2335 | /* |
| 2336 | * combine freq adjustment with unthrottling to avoid two passes over the | ||
| 2337 | * events. At the same time, make sure, having freq events does not change | ||
| 2338 | * the rate of unthrottling as that would introduce bias. | ||
| 2339 | */ | ||
| 2340 | static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | ||
| 2341 | int needs_unthr) | ||
| 2329 | { | 2342 | { |
| 2330 | struct perf_event *event; | 2343 | struct perf_event *event; |
| 2331 | struct hw_perf_event *hwc; | 2344 | struct hw_perf_event *hwc; |
| 2332 | u64 interrupts, now; | 2345 | u64 now, period = TICK_NSEC; |
| 2333 | s64 delta; | 2346 | s64 delta; |
| 2334 | 2347 | ||
| 2335 | if (!ctx->nr_freq) | 2348 | /* |
| 2349 | * only need to iterate over all events iff: | ||
| 2350 | * - context have events in frequency mode (needs freq adjust) | ||
| 2351 | * - there are events to unthrottle on this cpu | ||
| 2352 | */ | ||
| 2353 | if (!(ctx->nr_freq || needs_unthr)) | ||
| 2336 | return; | 2354 | return; |
| 2337 | 2355 | ||
| 2356 | raw_spin_lock(&ctx->lock); | ||
| 2357 | perf_pmu_disable(ctx->pmu); | ||
| 2358 | |||
| 2338 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 2339 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 2340 | continue; | 2361 | continue; |
| @@ -2344,13 +2365,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
| 2344 | 2365 | ||
| 2345 | hwc = &event->hw; | 2366 | hwc = &event->hw; |
| 2346 | 2367 | ||
| 2347 | interrupts = hwc->interrupts; | 2368 | if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) { |
| 2348 | hwc->interrupts = 0; | 2369 | hwc->interrupts = 0; |
| 2349 | |||
| 2350 | /* | ||
| 2351 | * unthrottle events on the tick | ||
| 2352 | */ | ||
| 2353 | if (interrupts == MAX_INTERRUPTS) { | ||
| 2354 | perf_log_throttle(event, 1); | 2370 | perf_log_throttle(event, 1); |
| 2355 | event->pmu->start(event, 0); | 2371 | event->pmu->start(event, 0); |
| 2356 | } | 2372 | } |
| @@ -2358,14 +2374,30 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
| 2358 | if (!event->attr.freq || !event->attr.sample_freq) | 2374 | if (!event->attr.freq || !event->attr.sample_freq) |
| 2359 | continue; | 2375 | continue; |
| 2360 | 2376 | ||
| 2361 | event->pmu->read(event); | 2377 | /* |
| 2378 | * stop the event and update event->count | ||
| 2379 | */ | ||
| 2380 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
| 2381 | |||
| 2362 | now = local64_read(&event->count); | 2382 | now = local64_read(&event->count); |
| 2363 | delta = now - hwc->freq_count_stamp; | 2383 | delta = now - hwc->freq_count_stamp; |
| 2364 | hwc->freq_count_stamp = now; | 2384 | hwc->freq_count_stamp = now; |
| 2365 | 2385 | ||
| 2386 | /* | ||
| 2387 | * restart the event | ||
| 2388 | * reload only if value has changed | ||
| 2389 | * we have stopped the event so tell that | ||
| 2390 | * to perf_adjust_period() to avoid stopping it | ||
| 2391 | * twice. | ||
| 2392 | */ | ||
| 2366 | if (delta > 0) | 2393 | if (delta > 0) |
| 2367 | perf_adjust_period(event, period, delta); | 2394 | perf_adjust_period(event, period, delta, false); |
| 2395 | |||
| 2396 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | ||
| 2368 | } | 2397 | } |
| 2398 | |||
| 2399 | perf_pmu_enable(ctx->pmu); | ||
| 2400 | raw_spin_unlock(&ctx->lock); | ||
| 2369 | } | 2401 | } |
| 2370 | 2402 | ||
| 2371 | /* | 2403 | /* |
| @@ -2388,16 +2420,13 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
| 2388 | */ | 2420 | */ |
| 2389 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) | 2421 | static void perf_rotate_context(struct perf_cpu_context *cpuctx) |
| 2390 | { | 2422 | { |
| 2391 | u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; | ||
| 2392 | struct perf_event_context *ctx = NULL; | 2423 | struct perf_event_context *ctx = NULL; |
| 2393 | int rotate = 0, remove = 1, freq = 0; | 2424 | int rotate = 0, remove = 1; |
| 2394 | 2425 | ||
| 2395 | if (cpuctx->ctx.nr_events) { | 2426 | if (cpuctx->ctx.nr_events) { |
| 2396 | remove = 0; | 2427 | remove = 0; |
| 2397 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | 2428 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) |
| 2398 | rotate = 1; | 2429 | rotate = 1; |
| 2399 | if (cpuctx->ctx.nr_freq) | ||
| 2400 | freq = 1; | ||
| 2401 | } | 2430 | } |
| 2402 | 2431 | ||
| 2403 | ctx = cpuctx->task_ctx; | 2432 | ctx = cpuctx->task_ctx; |
| @@ -2405,37 +2434,26 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
| 2405 | remove = 0; | 2434 | remove = 0; |
| 2406 | if (ctx->nr_events != ctx->nr_active) | 2435 | if (ctx->nr_events != ctx->nr_active) |
| 2407 | rotate = 1; | 2436 | rotate = 1; |
| 2408 | if (ctx->nr_freq) | ||
| 2409 | freq = 1; | ||
| 2410 | } | 2437 | } |
| 2411 | 2438 | ||
| 2412 | if (!rotate && !freq) | 2439 | if (!rotate) |
| 2413 | goto done; | 2440 | goto done; |
| 2414 | 2441 | ||
| 2415 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | 2442 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 2416 | perf_pmu_disable(cpuctx->ctx.pmu); | 2443 | perf_pmu_disable(cpuctx->ctx.pmu); |
| 2417 | 2444 | ||
| 2418 | if (freq) { | 2445 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| 2419 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); | 2446 | if (ctx) |
| 2420 | if (ctx) | 2447 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); |
| 2421 | perf_ctx_adjust_freq(ctx, interval); | ||
| 2422 | } | ||
| 2423 | |||
| 2424 | if (rotate) { | ||
| 2425 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | ||
| 2426 | if (ctx) | ||
| 2427 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); | ||
| 2428 | 2448 | ||
| 2429 | rotate_ctx(&cpuctx->ctx); | 2449 | rotate_ctx(&cpuctx->ctx); |
| 2430 | if (ctx) | 2450 | if (ctx) |
| 2431 | rotate_ctx(ctx); | 2451 | rotate_ctx(ctx); |
| 2432 | 2452 | ||
| 2433 | perf_event_sched_in(cpuctx, ctx, current); | 2453 | perf_event_sched_in(cpuctx, ctx, current); |
| 2434 | } | ||
| 2435 | 2454 | ||
| 2436 | perf_pmu_enable(cpuctx->ctx.pmu); | 2455 | perf_pmu_enable(cpuctx->ctx.pmu); |
| 2437 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | 2456 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
| 2438 | |||
| 2439 | done: | 2457 | done: |
| 2440 | if (remove) | 2458 | if (remove) |
| 2441 | list_del_init(&cpuctx->rotation_list); | 2459 | list_del_init(&cpuctx->rotation_list); |
| @@ -2445,10 +2463,22 @@ void perf_event_task_tick(void) | |||
| 2445 | { | 2463 | { |
| 2446 | struct list_head *head = &__get_cpu_var(rotation_list); | 2464 | struct list_head *head = &__get_cpu_var(rotation_list); |
| 2447 | struct perf_cpu_context *cpuctx, *tmp; | 2465 | struct perf_cpu_context *cpuctx, *tmp; |
| 2466 | struct perf_event_context *ctx; | ||
| 2467 | int throttled; | ||
| 2448 | 2468 | ||
| 2449 | WARN_ON(!irqs_disabled()); | 2469 | WARN_ON(!irqs_disabled()); |
| 2450 | 2470 | ||
| 2471 | __this_cpu_inc(perf_throttled_seq); | ||
| 2472 | throttled = __this_cpu_xchg(perf_throttled_count, 0); | ||
| 2473 | |||
| 2451 | list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { | 2474 | list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { |
| 2475 | ctx = &cpuctx->ctx; | ||
| 2476 | perf_adjust_freq_unthr_context(ctx, throttled); | ||
| 2477 | |||
| 2478 | ctx = cpuctx->task_ctx; | ||
| 2479 | if (ctx) | ||
| 2480 | perf_adjust_freq_unthr_context(ctx, throttled); | ||
| 2481 | |||
| 2452 | if (cpuctx->jiffies_interval == 1 || | 2482 | if (cpuctx->jiffies_interval == 1 || |
| 2453 | !(jiffies % cpuctx->jiffies_interval)) | 2483 | !(jiffies % cpuctx->jiffies_interval)) |
| 2454 | perf_rotate_context(cpuctx); | 2484 | perf_rotate_context(cpuctx); |
| @@ -4509,6 +4539,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
| 4509 | { | 4539 | { |
| 4510 | int events = atomic_read(&event->event_limit); | 4540 | int events = atomic_read(&event->event_limit); |
| 4511 | struct hw_perf_event *hwc = &event->hw; | 4541 | struct hw_perf_event *hwc = &event->hw; |
| 4542 | u64 seq; | ||
| 4512 | int ret = 0; | 4543 | int ret = 0; |
| 4513 | 4544 | ||
| 4514 | /* | 4545 | /* |
| @@ -4518,14 +4549,20 @@ static int __perf_event_overflow(struct perf_event *event, | |||
| 4518 | if (unlikely(!is_sampling_event(event))) | 4549 | if (unlikely(!is_sampling_event(event))) |
| 4519 | return 0; | 4550 | return 0; |
| 4520 | 4551 | ||
| 4521 | if (unlikely(hwc->interrupts >= max_samples_per_tick)) { | 4552 | seq = __this_cpu_read(perf_throttled_seq); |
| 4522 | if (throttle) { | 4553 | if (seq != hwc->interrupts_seq) { |
| 4554 | hwc->interrupts_seq = seq; | ||
| 4555 | hwc->interrupts = 1; | ||
| 4556 | } else { | ||
| 4557 | hwc->interrupts++; | ||
| 4558 | if (unlikely(throttle | ||
| 4559 | && hwc->interrupts >= max_samples_per_tick)) { | ||
| 4560 | __this_cpu_inc(perf_throttled_count); | ||
| 4523 | hwc->interrupts = MAX_INTERRUPTS; | 4561 | hwc->interrupts = MAX_INTERRUPTS; |
| 4524 | perf_log_throttle(event, 0); | 4562 | perf_log_throttle(event, 0); |
| 4525 | ret = 1; | 4563 | ret = 1; |
| 4526 | } | 4564 | } |
| 4527 | } else | 4565 | } |
| 4528 | hwc->interrupts++; | ||
| 4529 | 4566 | ||
| 4530 | if (event->attr.freq) { | 4567 | if (event->attr.freq) { |
| 4531 | u64 now = perf_clock(); | 4568 | u64 now = perf_clock(); |
| @@ -4534,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
| 4534 | hwc->freq_time_stamp = now; | 4571 | hwc->freq_time_stamp = now; |
| 4535 | 4572 | ||
| 4536 | if (delta > 0 && delta < 2*TICK_NSEC) | 4573 | if (delta > 0 && delta < 2*TICK_NSEC) |
| 4537 | perf_adjust_period(event, delta, hwc->last_period); | 4574 | perf_adjust_period(event, delta, hwc->last_period, true); |
| 4538 | } | 4575 | } |
| 4539 | 4576 | ||
| 4540 | /* | 4577 | /* |
diff --git a/kernel/exit.c b/kernel/exit.c index 294b1709170d..4b4042f9bc6a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1038,6 +1038,22 @@ void do_exit(long code) | |||
| 1038 | if (tsk->nr_dirtied) | 1038 | if (tsk->nr_dirtied) |
| 1039 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | 1039 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); |
| 1040 | exit_rcu(); | 1040 | exit_rcu(); |
| 1041 | |||
| 1042 | /* | ||
| 1043 | * The setting of TASK_RUNNING by try_to_wake_up() may be delayed | ||
| 1044 | * when the following two conditions become true. | ||
| 1045 | * - There is race condition of mmap_sem (It is acquired by | ||
| 1046 | * exit_mm()), and | ||
| 1047 | * - SMI occurs before setting TASK_RUNINNG. | ||
| 1048 | * (or hypervisor of virtual machine switches to other guest) | ||
| 1049 | * As a result, we may become TASK_RUNNING after becoming TASK_DEAD | ||
| 1050 | * | ||
| 1051 | * To avoid it, we have to wait for releasing tsk->pi_lock which | ||
| 1052 | * is held by try_to_wake_up() | ||
| 1053 | */ | ||
| 1054 | smp_mb(); | ||
| 1055 | raw_spin_unlock_wait(&tsk->pi_lock); | ||
| 1056 | |||
| 1041 | /* causes final put_task_struct in finish_task_switch(). */ | 1057 | /* causes final put_task_struct in finish_task_switch(). */ |
| 1042 | tsk->state = TASK_DEAD; | 1058 | tsk->state = TASK_DEAD; |
| 1043 | tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ | 1059 | tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 051f090d40c1..b77fd559c78e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) | |||
| 647 | } | 647 | } |
| 648 | EXPORT_SYMBOL_GPL(get_task_mm); | 648 | EXPORT_SYMBOL_GPL(get_task_mm); |
| 649 | 649 | ||
| 650 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) | ||
| 651 | { | ||
| 652 | struct mm_struct *mm; | ||
| 653 | int err; | ||
| 654 | |||
| 655 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); | ||
| 656 | if (err) | ||
| 657 | return ERR_PTR(err); | ||
| 658 | |||
| 659 | mm = get_task_mm(task); | ||
| 660 | if (mm && mm != current->mm && | ||
| 661 | !ptrace_may_access(task, mode)) { | ||
| 662 | mmput(mm); | ||
| 663 | mm = ERR_PTR(-EACCES); | ||
| 664 | } | ||
| 665 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
| 666 | |||
| 667 | return mm; | ||
| 668 | } | ||
| 669 | |||
| 650 | /* Please note the differences between mmput and mm_release. | 670 | /* Please note the differences between mmput and mm_release. |
| 651 | * mmput is called whenever we stop holding onto a mm_struct, | 671 | * mmput is called whenever we stop holding onto a mm_struct, |
| 652 | * error success whatever. | 672 | * error success whatever. |
| @@ -890,7 +910,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) | |||
| 890 | return -ENOMEM; | 910 | return -ENOMEM; |
| 891 | 911 | ||
| 892 | new_ioc->ioprio = ioc->ioprio; | 912 | new_ioc->ioprio = ioc->ioprio; |
| 893 | put_io_context(new_ioc, NULL); | 913 | put_io_context(new_ioc); |
| 894 | } | 914 | } |
| 895 | #endif | 915 | #endif |
| 896 | return 0; | 916 | return 0; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 95dd7212e610..9788c0ec6f43 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
| 1077 | /* Early boot. kretprobe_table_locks not yet initialized. */ | 1077 | /* Early boot. kretprobe_table_locks not yet initialized. */ |
| 1078 | return; | 1078 | return; |
| 1079 | 1079 | ||
| 1080 | INIT_HLIST_HEAD(&empty_rp); | ||
| 1080 | hash = hash_ptr(tk, KPROBE_HASH_BITS); | 1081 | hash = hash_ptr(tk, KPROBE_HASH_BITS); |
| 1081 | head = &kretprobe_inst_table[hash]; | 1082 | head = &kretprobe_inst_table[hash]; |
| 1082 | kretprobe_table_lock(hash, &flags); | 1083 | kretprobe_table_lock(hash, &flags); |
| @@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
| 1085 | recycle_rp_inst(ri, &empty_rp); | 1086 | recycle_rp_inst(ri, &empty_rp); |
| 1086 | } | 1087 | } |
| 1087 | kretprobe_table_unlock(hash, &flags); | 1088 | kretprobe_table_unlock(hash, &flags); |
| 1088 | INIT_HLIST_HEAD(&empty_rp); | ||
| 1089 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 1089 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
| 1090 | hlist_del(&ri->hlist); | 1090 | hlist_del(&ri->hlist); |
| 1091 | kfree(ri); | 1091 | kfree(ri); |
| @@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 1673 | ri->rp = rp; | 1673 | ri->rp = rp; |
| 1674 | ri->task = current; | 1674 | ri->task = current; |
| 1675 | 1675 | ||
| 1676 | if (rp->entry_handler && rp->entry_handler(ri, regs)) | 1676 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
| 1677 | raw_spin_lock_irqsave(&rp->lock, flags); | ||
| 1678 | hlist_add_head(&ri->hlist, &rp->free_instances); | ||
| 1679 | raw_spin_unlock_irqrestore(&rp->lock, flags); | ||
| 1677 | return 0; | 1680 | return 0; |
| 1681 | } | ||
| 1678 | 1682 | ||
| 1679 | arch_prepare_kretprobe(ri, regs); | 1683 | arch_prepare_kretprobe(ri, regs); |
| 1680 | 1684 | ||
diff --git a/kernel/params.c b/kernel/params.c index 32ee04308285..4bc965d8a1fe 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
| @@ -97,7 +97,8 @@ static int parse_one(char *param, | |||
| 97 | for (i = 0; i < num_params; i++) { | 97 | for (i = 0; i < num_params; i++) { |
| 98 | if (parameq(param, params[i].name)) { | 98 | if (parameq(param, params[i].name)) { |
| 99 | /* No one handled NULL, so do it here. */ | 99 | /* No one handled NULL, so do it here. */ |
| 100 | if (!val && params[i].ops->set != param_set_bool) | 100 | if (!val && params[i].ops->set != param_set_bool |
| 101 | && params[i].ops->set != param_set_bint) | ||
| 101 | return -EINVAL; | 102 | return -EINVAL; |
| 102 | pr_debug("They are equal! Calling %p\n", | 103 | pr_debug("They are equal! Calling %p\n", |
| 103 | params[i].ops->set); | 104 | params[i].ops->set); |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 0c4defe6d3b8..21724eee5206 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
| @@ -231,8 +231,28 @@ extern int pm_test_level; | |||
| 231 | #ifdef CONFIG_SUSPEND_FREEZER | 231 | #ifdef CONFIG_SUSPEND_FREEZER |
| 232 | static inline int suspend_freeze_processes(void) | 232 | static inline int suspend_freeze_processes(void) |
| 233 | { | 233 | { |
| 234 | int error = freeze_processes(); | 234 | int error; |
| 235 | return error ? : freeze_kernel_threads(); | 235 | |
| 236 | error = freeze_processes(); | ||
| 237 | |||
| 238 | /* | ||
| 239 | * freeze_processes() automatically thaws every task if freezing | ||
| 240 | * fails. So we need not do anything extra upon error. | ||
| 241 | */ | ||
| 242 | if (error) | ||
| 243 | goto Finish; | ||
| 244 | |||
| 245 | error = freeze_kernel_threads(); | ||
| 246 | |||
| 247 | /* | ||
| 248 | * freeze_kernel_threads() thaws only kernel threads upon freezing | ||
| 249 | * failure. So we have to thaw the userspace tasks ourselves. | ||
| 250 | */ | ||
| 251 | if (error) | ||
| 252 | thaw_processes(); | ||
| 253 | |||
| 254 | Finish: | ||
| 255 | return error; | ||
| 236 | } | 256 | } |
| 237 | 257 | ||
| 238 | static inline void suspend_thaw_processes(void) | 258 | static inline void suspend_thaw_processes(void) |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 77274c9ba2f1..7e426459e60a 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -143,7 +143,10 @@ int freeze_processes(void) | |||
| 143 | /** | 143 | /** |
| 144 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. | 144 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. |
| 145 | * | 145 | * |
| 146 | * On success, returns 0. On failure, -errno and system is fully thawed. | 146 | * On success, returns 0. On failure, -errno and only the kernel threads are |
| 147 | * thawed, so as to give a chance to the caller to do additional cleanups | ||
| 148 | * (if any) before thawing the userspace tasks. So, it is the responsibility | ||
| 149 | * of the caller to thaw the userspace tasks, when the time is right. | ||
| 147 | */ | 150 | */ |
| 148 | int freeze_kernel_threads(void) | 151 | int freeze_kernel_threads(void) |
| 149 | { | 152 | { |
| @@ -159,7 +162,7 @@ int freeze_kernel_threads(void) | |||
| 159 | BUG_ON(in_atomic()); | 162 | BUG_ON(in_atomic()); |
| 160 | 163 | ||
| 161 | if (error) | 164 | if (error) |
| 162 | thaw_processes(); | 165 | thaw_kernel_threads(); |
| 163 | return error; | 166 | return error; |
| 164 | } | 167 | } |
| 165 | 168 | ||
| @@ -188,3 +191,22 @@ void thaw_processes(void) | |||
| 188 | printk("done.\n"); | 191 | printk("done.\n"); |
| 189 | } | 192 | } |
| 190 | 193 | ||
| 194 | void thaw_kernel_threads(void) | ||
| 195 | { | ||
| 196 | struct task_struct *g, *p; | ||
| 197 | |||
| 198 | pm_nosig_freezing = false; | ||
| 199 | printk("Restarting kernel threads ... "); | ||
| 200 | |||
| 201 | thaw_workqueues(); | ||
| 202 | |||
| 203 | read_lock(&tasklist_lock); | ||
| 204 | do_each_thread(g, p) { | ||
| 205 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) | ||
| 206 | __thaw_task(p); | ||
| 207 | } while_each_thread(g, p); | ||
| 208 | read_unlock(&tasklist_lock); | ||
| 209 | |||
| 210 | schedule(); | ||
| 211 | printk("done.\n"); | ||
| 212 | } | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 1cf88900ec4f..6a768e537001 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -812,7 +812,8 @@ unsigned int snapshot_additional_pages(struct zone *zone) | |||
| 812 | unsigned int res; | 812 | unsigned int res; |
| 813 | 813 | ||
| 814 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | 814 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); |
| 815 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); | 815 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), |
| 816 | LINKED_PAGE_DATA_SIZE); | ||
| 816 | return 2 * res; | 817 | return 2 * res; |
| 817 | } | 818 | } |
| 818 | 819 | ||
diff --git a/kernel/power/user.c b/kernel/power/user.c index 6b1ab7a88522..3e100075b13c 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
| @@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 249 | } | 249 | } |
| 250 | pm_restore_gfp_mask(); | 250 | pm_restore_gfp_mask(); |
| 251 | error = hibernation_snapshot(data->platform_support); | 251 | error = hibernation_snapshot(data->platform_support); |
| 252 | if (!error) { | 252 | if (error) { |
| 253 | thaw_kernel_threads(); | ||
| 254 | } else { | ||
| 253 | error = put_user(in_suspend, (int __user *)arg); | 255 | error = put_user(in_suspend, (int __user *)arg); |
| 254 | if (!error && !freezer_test_done) | 256 | if (!error && !freezer_test_done) |
| 255 | data->ready = 1; | 257 | data->ready = 1; |
| 256 | if (freezer_test_done) { | 258 | if (freezer_test_done) { |
| 257 | freezer_test_done = false; | 259 | freezer_test_done = false; |
| 258 | thaw_processes(); | 260 | thaw_kernel_threads(); |
| 259 | } | 261 | } |
| 260 | } | 262 | } |
| 261 | break; | 263 | break; |
| @@ -274,6 +276,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 274 | swsusp_free(); | 276 | swsusp_free(); |
| 275 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); | 277 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); |
| 276 | data->ready = 0; | 278 | data->ready = 0; |
| 279 | /* | ||
| 280 | * It is necessary to thaw kernel threads here, because | ||
| 281 | * SNAPSHOT_CREATE_IMAGE may be invoked directly after | ||
| 282 | * SNAPSHOT_FREE. In that case, if kernel threads were not | ||
| 283 | * thawed, the preallocation of memory carried out by | ||
| 284 | * hibernation_snapshot() might run into problems (i.e. it | ||
| 285 | * might fail or even deadlock). | ||
| 286 | */ | ||
| 287 | thaw_kernel_threads(); | ||
| 277 | break; | 288 | break; |
| 278 | 289 | ||
| 279 | case SNAPSHOT_PREF_IMAGE_SIZE: | 290 | case SNAPSHOT_PREF_IMAGE_SIZE: |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 88f17b8a3b1d..a58ac285fc69 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -56,8 +56,8 @@ static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | |||
| 56 | static int nfakewriters = 4; /* # fake writer threads */ | 56 | static int nfakewriters = 4; /* # fake writer threads */ |
| 57 | static int stat_interval; /* Interval between stats, in seconds. */ | 57 | static int stat_interval; /* Interval between stats, in seconds. */ |
| 58 | /* Defaults to "only at end of test". */ | 58 | /* Defaults to "only at end of test". */ |
| 59 | static int verbose; /* Print more debug info. */ | 59 | static bool verbose; /* Print more debug info. */ |
| 60 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | 60 | static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ |
| 61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ | 61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ |
| 62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | 62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ |
| 63 | static int irqreader = 1; /* RCU readers from irq (timers). */ | 63 | static int irqreader = 1; /* RCU readers from irq (timers). */ |
| @@ -1399,7 +1399,7 @@ rcu_torture_shutdown(void *arg) | |||
| 1399 | * Execute random CPU-hotplug operations at the interval specified | 1399 | * Execute random CPU-hotplug operations at the interval specified |
| 1400 | * by the onoff_interval. | 1400 | * by the onoff_interval. |
| 1401 | */ | 1401 | */ |
| 1402 | static int | 1402 | static int __cpuinit |
| 1403 | rcu_torture_onoff(void *arg) | 1403 | rcu_torture_onoff(void *arg) |
| 1404 | { | 1404 | { |
| 1405 | int cpu; | 1405 | int cpu; |
| @@ -1447,7 +1447,7 @@ rcu_torture_onoff(void *arg) | |||
| 1447 | return 0; | 1447 | return 0; |
| 1448 | } | 1448 | } |
| 1449 | 1449 | ||
| 1450 | static int | 1450 | static int __cpuinit |
| 1451 | rcu_torture_onoff_init(void) | 1451 | rcu_torture_onoff_init(void) |
| 1452 | { | 1452 | { |
| 1453 | if (onoff_interval <= 0) | 1453 | if (onoff_interval <= 0) |
diff --git a/kernel/relay.c b/kernel/relay.c index 4335e1d7ee2d..ab56a1764d4d 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -164,10 +164,14 @@ depopulate: | |||
| 164 | */ | 164 | */ |
| 165 | static struct rchan_buf *relay_create_buf(struct rchan *chan) | 165 | static struct rchan_buf *relay_create_buf(struct rchan *chan) |
| 166 | { | 166 | { |
| 167 | struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); | 167 | struct rchan_buf *buf; |
| 168 | if (!buf) | 168 | |
| 169 | if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) | ||
| 169 | return NULL; | 170 | return NULL; |
| 170 | 171 | ||
| 172 | buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); | ||
| 173 | if (!buf) | ||
| 174 | return NULL; | ||
| 171 | buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); | 175 | buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); |
| 172 | if (!buf->padding) | 176 | if (!buf->padding) |
| 173 | goto free_buf; | 177 | goto free_buf; |
| @@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename, | |||
| 574 | 578 | ||
| 575 | if (!(subbuf_size && n_subbufs)) | 579 | if (!(subbuf_size && n_subbufs)) |
| 576 | return NULL; | 580 | return NULL; |
| 581 | if (subbuf_size > UINT_MAX / n_subbufs) | ||
| 582 | return NULL; | ||
| 577 | 583 | ||
| 578 | chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); | 584 | chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); |
| 579 | if (!chan) | 585 | if (!chan) |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 6d269cce7aa1..d508363858b3 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
| @@ -66,6 +66,31 @@ done: | |||
| 66 | return ret; | 66 | return ret; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, | ||
| 70 | struct res_counter **limit_fail_at) | ||
| 71 | { | ||
| 72 | int ret, r; | ||
| 73 | unsigned long flags; | ||
| 74 | struct res_counter *c; | ||
| 75 | |||
| 76 | r = ret = 0; | ||
| 77 | *limit_fail_at = NULL; | ||
| 78 | local_irq_save(flags); | ||
| 79 | for (c = counter; c != NULL; c = c->parent) { | ||
| 80 | spin_lock(&c->lock); | ||
| 81 | r = res_counter_charge_locked(c, val); | ||
| 82 | if (r) | ||
| 83 | c->usage += val; | ||
| 84 | spin_unlock(&c->lock); | ||
| 85 | if (r < 0 && ret == 0) { | ||
| 86 | *limit_fail_at = c; | ||
| 87 | ret = r; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | local_irq_restore(flags); | ||
| 91 | |||
| 92 | return ret; | ||
| 93 | } | ||
| 69 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | 94 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) |
| 70 | { | 95 | { |
| 71 | if (WARN_ON(counter->usage < val)) | 96 | if (WARN_ON(counter->usage < val)) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df00cb09263e..5255c9d2e053 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -74,6 +74,7 @@ | |||
| 74 | 74 | ||
| 75 | #include <asm/tlb.h> | 75 | #include <asm/tlb.h> |
| 76 | #include <asm/irq_regs.h> | 76 | #include <asm/irq_regs.h> |
| 77 | #include <asm/mutex.h> | ||
| 77 | #ifdef CONFIG_PARAVIRT | 78 | #ifdef CONFIG_PARAVIRT |
| 78 | #include <asm/paravirt.h> | 79 | #include <asm/paravirt.h> |
| 79 | #endif | 80 | #endif |
| @@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) | |||
| 723 | p->sched_class->dequeue_task(rq, p, flags); | 724 | p->sched_class->dequeue_task(rq, p, flags); |
| 724 | } | 725 | } |
| 725 | 726 | ||
| 726 | /* | ||
| 727 | * activate_task - move a task to the runqueue. | ||
| 728 | */ | ||
| 729 | void activate_task(struct rq *rq, struct task_struct *p, int flags) | 727 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
| 730 | { | 728 | { |
| 731 | if (task_contributes_to_load(p)) | 729 | if (task_contributes_to_load(p)) |
| @@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) | |||
| 734 | enqueue_task(rq, p, flags); | 732 | enqueue_task(rq, p, flags); |
| 735 | } | 733 | } |
| 736 | 734 | ||
| 737 | /* | ||
| 738 | * deactivate_task - remove a task from the runqueue. | ||
| 739 | */ | ||
| 740 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | 735 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
| 741 | { | 736 | { |
| 742 | if (task_contributes_to_load(p)) | 737 | if (task_contributes_to_load(p)) |
| @@ -4134,7 +4129,7 @@ recheck: | |||
| 4134 | on_rq = p->on_rq; | 4129 | on_rq = p->on_rq; |
| 4135 | running = task_current(rq, p); | 4130 | running = task_current(rq, p); |
| 4136 | if (on_rq) | 4131 | if (on_rq) |
| 4137 | deactivate_task(rq, p, 0); | 4132 | dequeue_task(rq, p, 0); |
| 4138 | if (running) | 4133 | if (running) |
| 4139 | p->sched_class->put_prev_task(rq, p); | 4134 | p->sched_class->put_prev_task(rq, p); |
| 4140 | 4135 | ||
| @@ -4147,7 +4142,7 @@ recheck: | |||
| 4147 | if (running) | 4142 | if (running) |
| 4148 | p->sched_class->set_curr_task(rq); | 4143 | p->sched_class->set_curr_task(rq); |
| 4149 | if (on_rq) | 4144 | if (on_rq) |
| 4150 | activate_task(rq, p, 0); | 4145 | enqueue_task(rq, p, 0); |
| 4151 | 4146 | ||
| 4152 | check_class_changed(rq, p, prev_class, oldprio); | 4147 | check_class_changed(rq, p, prev_class, oldprio); |
| 4153 | task_rq_unlock(rq, p, &flags); | 4148 | task_rq_unlock(rq, p, &flags); |
| @@ -4998,9 +4993,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
| 4998 | * placed properly. | 4993 | * placed properly. |
| 4999 | */ | 4994 | */ |
| 5000 | if (p->on_rq) { | 4995 | if (p->on_rq) { |
| 5001 | deactivate_task(rq_src, p, 0); | 4996 | dequeue_task(rq_src, p, 0); |
| 5002 | set_task_cpu(p, dest_cpu); | 4997 | set_task_cpu(p, dest_cpu); |
| 5003 | activate_task(rq_dest, p, 0); | 4998 | enqueue_task(rq_dest, p, 0); |
| 5004 | check_preempt_curr(rq_dest, p, 0); | 4999 | check_preempt_curr(rq_dest, p, 0); |
| 5005 | } | 5000 | } |
| 5006 | done: | 5001 | done: |
| @@ -7032,10 +7027,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
| 7032 | 7027 | ||
| 7033 | on_rq = p->on_rq; | 7028 | on_rq = p->on_rq; |
| 7034 | if (on_rq) | 7029 | if (on_rq) |
| 7035 | deactivate_task(rq, p, 0); | 7030 | dequeue_task(rq, p, 0); |
| 7036 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 7031 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
| 7037 | if (on_rq) { | 7032 | if (on_rq) { |
| 7038 | activate_task(rq, p, 0); | 7033 | enqueue_task(rq, p, 0); |
| 7039 | resched_task(rq->curr); | 7034 | resched_task(rq->curr); |
| 7040 | } | 7035 | } |
| 7041 | 7036 | ||
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index b0d798eaf130..d72586fdf660 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
| @@ -129,7 +129,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
| 129 | * cpupri_set - update the cpu priority setting | 129 | * cpupri_set - update the cpu priority setting |
| 130 | * @cp: The cpupri context | 130 | * @cp: The cpupri context |
| 131 | * @cpu: The target cpu | 131 | * @cpu: The target cpu |
| 132 | * @pri: The priority (INVALID-RT99) to assign to this CPU | 132 | * @newpri: The priority (INVALID-RT99) to assign to this CPU |
| 133 | * | 133 | * |
| 134 | * Note: Assumes cpu_rq(cpu)->lock is locked | 134 | * Note: Assumes cpu_rq(cpu)->lock is locked |
| 135 | * | 135 | * |
| @@ -200,7 +200,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
| 200 | /** | 200 | /** |
| 201 | * cpupri_init - initialize the cpupri structure | 201 | * cpupri_init - initialize the cpupri structure |
| 202 | * @cp: The cpupri context | 202 | * @cp: The cpupri context |
| 203 | * @bootmem: true if allocations need to use bootmem | ||
| 204 | * | 203 | * |
| 205 | * Returns: -ENOMEM if memory fails. | 204 | * Returns: -ENOMEM if memory fails. |
| 206 | */ | 205 | */ |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 84adb2d66cbd..7c6414fc669d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu) | |||
| 4866 | return; | 4866 | return; |
| 4867 | } | 4867 | } |
| 4868 | 4868 | ||
| 4869 | static inline void clear_nohz_tick_stopped(int cpu) | ||
| 4870 | { | ||
| 4871 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | ||
| 4872 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
| 4873 | atomic_dec(&nohz.nr_cpus); | ||
| 4874 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | ||
| 4875 | } | ||
| 4876 | } | ||
| 4877 | |||
| 4869 | static inline void set_cpu_sd_state_busy(void) | 4878 | static inline void set_cpu_sd_state_busy(void) |
| 4870 | { | 4879 | { |
| 4871 | struct sched_domain *sd; | 4880 | struct sched_domain *sd; |
| @@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick) | |||
| 4904 | { | 4913 | { |
| 4905 | int cpu = smp_processor_id(); | 4914 | int cpu = smp_processor_id(); |
| 4906 | 4915 | ||
| 4916 | /* | ||
| 4917 | * If this cpu is going down, then nothing needs to be done. | ||
| 4918 | */ | ||
| 4919 | if (!cpu_active(cpu)) | ||
| 4920 | return; | ||
| 4921 | |||
| 4907 | if (stop_tick) { | 4922 | if (stop_tick) { |
| 4908 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) | 4923 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) |
| 4909 | return; | 4924 | return; |
| @@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick) | |||
| 4914 | } | 4929 | } |
| 4915 | return; | 4930 | return; |
| 4916 | } | 4931 | } |
| 4932 | |||
| 4933 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | ||
| 4934 | unsigned long action, void *hcpu) | ||
| 4935 | { | ||
| 4936 | switch (action & ~CPU_TASKS_FROZEN) { | ||
| 4937 | case CPU_DYING: | ||
| 4938 | clear_nohz_tick_stopped(smp_processor_id()); | ||
| 4939 | return NOTIFY_OK; | ||
| 4940 | default: | ||
| 4941 | return NOTIFY_DONE; | ||
| 4942 | } | ||
| 4943 | } | ||
| 4917 | #endif | 4944 | #endif |
| 4918 | 4945 | ||
| 4919 | static DEFINE_SPINLOCK(balancing); | 4946 | static DEFINE_SPINLOCK(balancing); |
| @@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
| 5070 | * busy tick after returning from idle, we will update the busy stats. | 5097 | * busy tick after returning from idle, we will update the busy stats. |
| 5071 | */ | 5098 | */ |
| 5072 | set_cpu_sd_state_busy(); | 5099 | set_cpu_sd_state_busy(); |
| 5073 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | 5100 | clear_nohz_tick_stopped(cpu); |
| 5074 | clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | ||
| 5075 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
| 5076 | atomic_dec(&nohz.nr_cpus); | ||
| 5077 | } | ||
| 5078 | 5101 | ||
| 5079 | /* | 5102 | /* |
| 5080 | * None are in tickless mode and hence no need for NOHZ idle load | 5103 | * None are in tickless mode and hence no need for NOHZ idle load |
| @@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void) | |||
| 5590 | 5613 | ||
| 5591 | #ifdef CONFIG_NO_HZ | 5614 | #ifdef CONFIG_NO_HZ |
| 5592 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | 5615 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 5616 | cpu_notifier(sched_ilb_notifier, 0); | ||
| 5593 | #endif | 5617 | #endif |
| 5594 | #endif /* SMP */ | 5618 | #endif /* SMP */ |
| 5595 | 5619 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 3640ebbb466b..f42ae7fb5ec5 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq) | |||
| 1587 | if (!next_task) | 1587 | if (!next_task) |
| 1588 | return 0; | 1588 | return 0; |
| 1589 | 1589 | ||
| 1590 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
| 1591 | if (unlikely(task_running(rq, next_task))) | ||
| 1592 | return 0; | ||
| 1593 | #endif | ||
| 1594 | |||
| 1590 | retry: | 1595 | retry: |
| 1591 | if (unlikely(next_task == rq->curr)) { | 1596 | if (unlikely(next_task == rq->curr)) { |
| 1592 | WARN_ON(1); | 1597 | WARN_ON(1); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1d7bca7f4f52..d117262deba3 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
| 296 | if (__this_cpu_read(soft_watchdog_warn) == true) | 296 | if (__this_cpu_read(soft_watchdog_warn) == true) |
| 297 | return HRTIMER_RESTART; | 297 | return HRTIMER_RESTART; |
| 298 | 298 | ||
| 299 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 299 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
| 300 | smp_processor_id(), duration, | 300 | smp_processor_id(), duration, |
| 301 | current->comm, task_pid_nr(current)); | 301 | current->comm, task_pid_nr(current)); |
| 302 | print_modules(); | 302 | print_modules(); |
