diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 6 | ||||
| -rw-r--r-- | kernel/bpf/core.c | 12 | ||||
| -rw-r--r-- | kernel/compat.c | 6 | ||||
| -rw-r--r-- | kernel/events/core.c | 44 | ||||
| -rw-r--r-- | kernel/events/ring_buffer.c | 14 | ||||
| -rw-r--r-- | kernel/irq/dummychip.c | 1 | ||||
| -rw-r--r-- | kernel/kexec.c | 2 | ||||
| -rw-r--r-- | kernel/locking/lockdep.c | 3 | ||||
| -rw-r--r-- | kernel/locking/lockdep_proc.c | 22 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 12 | ||||
| -rw-r--r-- | kernel/module.c | 3 | ||||
| -rw-r--r-- | kernel/rcu/tree.c | 16 | ||||
| -rw-r--r-- | kernel/sched/core.c | 74 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 2 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 16 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 6 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 14 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 3 | ||||
| -rw-r--r-- | kernel/watchdog.c | 20 |
20 files changed, 159 insertions, 119 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 0f8f8b0bc1bf..60c302cfb4d3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -197,9 +197,9 @@ x509.genkey: | |||
| 197 | @echo >>x509.genkey "x509_extensions = myexts" | 197 | @echo >>x509.genkey "x509_extensions = myexts" |
| 198 | @echo >>x509.genkey | 198 | @echo >>x509.genkey |
| 199 | @echo >>x509.genkey "[ req_distinguished_name ]" | 199 | @echo >>x509.genkey "[ req_distinguished_name ]" |
| 200 | @echo >>x509.genkey "O = Magrathea" | 200 | @echo >>x509.genkey "#O = Unspecified company" |
| 201 | @echo >>x509.genkey "CN = Glacier signing key" | 201 | @echo >>x509.genkey "CN = Build time autogenerated kernel key" |
| 202 | @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" | 202 | @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company" |
| 203 | @echo >>x509.genkey | 203 | @echo >>x509.genkey |
| 204 | @echo >>x509.genkey "[ myexts ]" | 204 | @echo >>x509.genkey "[ myexts ]" |
| 205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" | 205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4139a0f8b558..54f0e7fcd0e2 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
| @@ -357,8 +357,8 @@ select_insn: | |||
| 357 | ALU64_MOD_X: | 357 | ALU64_MOD_X: |
| 358 | if (unlikely(SRC == 0)) | 358 | if (unlikely(SRC == 0)) |
| 359 | return 0; | 359 | return 0; |
| 360 | tmp = DST; | 360 | div64_u64_rem(DST, SRC, &tmp); |
| 361 | DST = do_div(tmp, SRC); | 361 | DST = tmp; |
| 362 | CONT; | 362 | CONT; |
| 363 | ALU_MOD_X: | 363 | ALU_MOD_X: |
| 364 | if (unlikely(SRC == 0)) | 364 | if (unlikely(SRC == 0)) |
| @@ -367,8 +367,8 @@ select_insn: | |||
| 367 | DST = do_div(tmp, (u32) SRC); | 367 | DST = do_div(tmp, (u32) SRC); |
| 368 | CONT; | 368 | CONT; |
| 369 | ALU64_MOD_K: | 369 | ALU64_MOD_K: |
| 370 | tmp = DST; | 370 | div64_u64_rem(DST, IMM, &tmp); |
| 371 | DST = do_div(tmp, IMM); | 371 | DST = tmp; |
| 372 | CONT; | 372 | CONT; |
| 373 | ALU_MOD_K: | 373 | ALU_MOD_K: |
| 374 | tmp = (u32) DST; | 374 | tmp = (u32) DST; |
| @@ -377,7 +377,7 @@ select_insn: | |||
| 377 | ALU64_DIV_X: | 377 | ALU64_DIV_X: |
| 378 | if (unlikely(SRC == 0)) | 378 | if (unlikely(SRC == 0)) |
| 379 | return 0; | 379 | return 0; |
| 380 | do_div(DST, SRC); | 380 | DST = div64_u64(DST, SRC); |
| 381 | CONT; | 381 | CONT; |
| 382 | ALU_DIV_X: | 382 | ALU_DIV_X: |
| 383 | if (unlikely(SRC == 0)) | 383 | if (unlikely(SRC == 0)) |
| @@ -387,7 +387,7 @@ select_insn: | |||
| 387 | DST = (u32) tmp; | 387 | DST = (u32) tmp; |
| 388 | CONT; | 388 | CONT; |
| 389 | ALU64_DIV_K: | 389 | ALU64_DIV_K: |
| 390 | do_div(DST, IMM); | 390 | DST = div64_u64(DST, IMM); |
| 391 | CONT; | 391 | CONT; |
| 392 | ALU_DIV_K: | 392 | ALU_DIV_K: |
| 393 | tmp = (u32) DST; | 393 | tmp = (u32) DST; |
diff --git a/kernel/compat.c b/kernel/compat.c index 24f00610c575..333d364be29d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, | |||
| 912 | * bitmap. We must however ensure the end of the | 912 | * bitmap. We must however ensure the end of the |
| 913 | * kernel bitmap is zeroed. | 913 | * kernel bitmap is zeroed. |
| 914 | */ | 914 | */ |
| 915 | if (nr_compat_longs-- > 0) { | 915 | if (nr_compat_longs) { |
| 916 | nr_compat_longs--; | ||
| 916 | if (__get_user(um, umask)) | 917 | if (__get_user(um, umask)) |
| 917 | return -EFAULT; | 918 | return -EFAULT; |
| 918 | } else { | 919 | } else { |
| @@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, | |||
| 954 | * We dont want to write past the end of the userspace | 955 | * We dont want to write past the end of the userspace |
| 955 | * bitmap. | 956 | * bitmap. |
| 956 | */ | 957 | */ |
| 957 | if (nr_compat_longs-- > 0) { | 958 | if (nr_compat_longs) { |
| 959 | nr_compat_longs--; | ||
| 958 | if (__put_user(um, umask)) | 960 | if (__put_user(um, umask)) |
| 959 | return -EFAULT; | 961 | return -EFAULT; |
| 960 | } | 962 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 81aa3a4ece9f..eddf1ed4155e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -913,10 +913,30 @@ static void put_ctx(struct perf_event_context *ctx) | |||
| 913 | * Those places that change perf_event::ctx will hold both | 913 | * Those places that change perf_event::ctx will hold both |
| 914 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. | 914 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. |
| 915 | * | 915 | * |
| 916 | * Lock ordering is by mutex address. There is one other site where | 916 | * Lock ordering is by mutex address. There are two other sites where |
| 917 | * perf_event_context::mutex nests and that is put_event(). But remember that | 917 | * perf_event_context::mutex nests and those are: |
| 918 | * that is a parent<->child context relation, and migration does not affect | 918 | * |
| 919 | * children, therefore these two orderings should not interact. | 919 | * - perf_event_exit_task_context() [ child , 0 ] |
| 920 | * __perf_event_exit_task() | ||
| 921 | * sync_child_event() | ||
| 922 | * put_event() [ parent, 1 ] | ||
| 923 | * | ||
| 924 | * - perf_event_init_context() [ parent, 0 ] | ||
| 925 | * inherit_task_group() | ||
| 926 | * inherit_group() | ||
| 927 | * inherit_event() | ||
| 928 | * perf_event_alloc() | ||
| 929 | * perf_init_event() | ||
| 930 | * perf_try_init_event() [ child , 1 ] | ||
| 931 | * | ||
| 932 | * While it appears there is an obvious deadlock here -- the parent and child | ||
| 933 | * nesting levels are inverted between the two. This is in fact safe because | ||
| 934 | * life-time rules separate them. That is an exiting task cannot fork, and a | ||
| 935 | * spawning task cannot (yet) exit. | ||
| 936 | * | ||
| 937 | * But remember that that these are parent<->child context relations, and | ||
| 938 | * migration does not affect children, therefore these two orderings should not | ||
| 939 | * interact. | ||
| 920 | * | 940 | * |
| 921 | * The change in perf_event::ctx does not affect children (as claimed above) | 941 | * The change in perf_event::ctx does not affect children (as claimed above) |
| 922 | * because the sys_perf_event_open() case will install a new event and break | 942 | * because the sys_perf_event_open() case will install a new event and break |
| @@ -3422,7 +3442,6 @@ static void free_event_rcu(struct rcu_head *head) | |||
| 3422 | if (event->ns) | 3442 | if (event->ns) |
| 3423 | put_pid_ns(event->ns); | 3443 | put_pid_ns(event->ns); |
| 3424 | perf_event_free_filter(event); | 3444 | perf_event_free_filter(event); |
| 3425 | perf_event_free_bpf_prog(event); | ||
| 3426 | kfree(event); | 3445 | kfree(event); |
| 3427 | } | 3446 | } |
| 3428 | 3447 | ||
| @@ -3553,6 +3572,8 @@ static void __free_event(struct perf_event *event) | |||
| 3553 | put_callchain_buffers(); | 3572 | put_callchain_buffers(); |
| 3554 | } | 3573 | } |
| 3555 | 3574 | ||
| 3575 | perf_event_free_bpf_prog(event); | ||
| 3576 | |||
| 3556 | if (event->destroy) | 3577 | if (event->destroy) |
| 3557 | event->destroy(event); | 3578 | event->destroy(event); |
| 3558 | 3579 | ||
| @@ -3657,9 +3678,6 @@ static void perf_remove_from_owner(struct perf_event *event) | |||
| 3657 | } | 3678 | } |
| 3658 | } | 3679 | } |
| 3659 | 3680 | ||
| 3660 | /* | ||
| 3661 | * Called when the last reference to the file is gone. | ||
| 3662 | */ | ||
| 3663 | static void put_event(struct perf_event *event) | 3681 | static void put_event(struct perf_event *event) |
| 3664 | { | 3682 | { |
| 3665 | struct perf_event_context *ctx; | 3683 | struct perf_event_context *ctx; |
| @@ -3697,6 +3715,9 @@ int perf_event_release_kernel(struct perf_event *event) | |||
| 3697 | } | 3715 | } |
| 3698 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); | 3716 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); |
| 3699 | 3717 | ||
| 3718 | /* | ||
| 3719 | * Called when the last reference to the file is gone. | ||
| 3720 | */ | ||
| 3700 | static int perf_release(struct inode *inode, struct file *file) | 3721 | static int perf_release(struct inode *inode, struct file *file) |
| 3701 | { | 3722 | { |
| 3702 | put_event(file->private_data); | 3723 | put_event(file->private_data); |
| @@ -7364,7 +7385,12 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) | |||
| 7364 | return -ENODEV; | 7385 | return -ENODEV; |
| 7365 | 7386 | ||
| 7366 | if (event->group_leader != event) { | 7387 | if (event->group_leader != event) { |
| 7367 | ctx = perf_event_ctx_lock(event->group_leader); | 7388 | /* |
| 7389 | * This ctx->mutex can nest when we're called through | ||
| 7390 | * inheritance. See the perf_event_ctx_lock_nested() comment. | ||
| 7391 | */ | ||
| 7392 | ctx = perf_event_ctx_lock_nested(event->group_leader, | ||
| 7393 | SINGLE_DEPTH_NESTING); | ||
| 7368 | BUG_ON(!ctx); | 7394 | BUG_ON(!ctx); |
| 7369 | } | 7395 | } |
| 7370 | 7396 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 232f00f273cb..725c416085e3 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
| @@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, | |||
| 493 | rb->aux_pages[rb->aux_nr_pages] = page_address(page++); | 493 | rb->aux_pages[rb->aux_nr_pages] = page_address(page++); |
| 494 | } | 494 | } |
| 495 | 495 | ||
| 496 | /* | ||
| 497 | * In overwrite mode, PMUs that don't support SG may not handle more | ||
| 498 | * than one contiguous allocation, since they rely on PMI to do double | ||
| 499 | * buffering. In this case, the entire buffer has to be one contiguous | ||
| 500 | * chunk. | ||
| 501 | */ | ||
| 502 | if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && | ||
| 503 | overwrite) { | ||
| 504 | struct page *page = virt_to_page(rb->aux_pages[0]); | ||
| 505 | |||
| 506 | if (page_private(page) != max_order) | ||
| 507 | goto out; | ||
| 508 | } | ||
| 509 | |||
| 496 | rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, | 510 | rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, |
| 497 | overwrite); | 511 | overwrite); |
| 498 | if (!rb->aux_priv) | 512 | if (!rb->aux_priv) |
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 988dc58e8847..2feb6feca0cc 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
| @@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = { | |||
| 57 | .irq_ack = noop, | 57 | .irq_ack = noop, |
| 58 | .irq_mask = noop, | 58 | .irq_mask = noop, |
| 59 | .irq_unmask = noop, | 59 | .irq_unmask = noop, |
| 60 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
| 60 | }; | 61 | }; |
| 61 | EXPORT_SYMBOL_GPL(dummy_irq_chip); | 62 | EXPORT_SYMBOL_GPL(dummy_irq_chip); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 38c25b1f2fd5..7a36fdcca5bf 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |||
| 707 | do { | 707 | do { |
| 708 | unsigned long pfn, epfn, addr, eaddr; | 708 | unsigned long pfn, epfn, addr, eaddr; |
| 709 | 709 | ||
| 710 | pages = kimage_alloc_pages(GFP_KERNEL, order); | 710 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); |
| 711 | if (!pages) | 711 | if (!pages) |
| 712 | break; | 712 | break; |
| 713 | pfn = page_to_pfn(pages); | 713 | pfn = page_to_pfn(pages); |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a0831e1b99f4..aaeae885d9af 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -3900,7 +3900,8 @@ static void zap_class(struct lock_class *class) | |||
| 3900 | list_del_rcu(&class->hash_entry); | 3900 | list_del_rcu(&class->hash_entry); |
| 3901 | list_del_rcu(&class->lock_entry); | 3901 | list_del_rcu(&class->lock_entry); |
| 3902 | 3902 | ||
| 3903 | class->key = NULL; | 3903 | RCU_INIT_POINTER(class->key, NULL); |
| 3904 | RCU_INIT_POINTER(class->name, NULL); | ||
| 3904 | } | 3905 | } |
| 3905 | 3906 | ||
| 3906 | static inline int within(const void *addr, void *start, unsigned long size) | 3907 | static inline int within(const void *addr, void *start, unsigned long size) |
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index ef43ac4bafb5..d83d798bef95 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c | |||
| @@ -426,10 +426,12 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) | |||
| 426 | 426 | ||
| 427 | static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | 427 | static void seq_stats(struct seq_file *m, struct lock_stat_data *data) |
| 428 | { | 428 | { |
| 429 | char name[39]; | 429 | struct lockdep_subclass_key *ckey; |
| 430 | struct lock_class *class; | ||
| 431 | struct lock_class_stats *stats; | 430 | struct lock_class_stats *stats; |
| 431 | struct lock_class *class; | ||
| 432 | const char *cname; | ||
| 432 | int i, namelen; | 433 | int i, namelen; |
| 434 | char name[39]; | ||
| 433 | 435 | ||
| 434 | class = data->class; | 436 | class = data->class; |
| 435 | stats = &data->stats; | 437 | stats = &data->stats; |
| @@ -440,15 +442,25 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
| 440 | if (class->subclass) | 442 | if (class->subclass) |
| 441 | namelen -= 2; | 443 | namelen -= 2; |
| 442 | 444 | ||
| 443 | if (!class->name) { | 445 | rcu_read_lock_sched(); |
| 446 | cname = rcu_dereference_sched(class->name); | ||
| 447 | ckey = rcu_dereference_sched(class->key); | ||
| 448 | |||
| 449 | if (!cname && !ckey) { | ||
| 450 | rcu_read_unlock_sched(); | ||
| 451 | return; | ||
| 452 | |||
| 453 | } else if (!cname) { | ||
| 444 | char str[KSYM_NAME_LEN]; | 454 | char str[KSYM_NAME_LEN]; |
| 445 | const char *key_name; | 455 | const char *key_name; |
| 446 | 456 | ||
| 447 | key_name = __get_key_name(class->key, str); | 457 | key_name = __get_key_name(ckey, str); |
| 448 | snprintf(name, namelen, "%s", key_name); | 458 | snprintf(name, namelen, "%s", key_name); |
| 449 | } else { | 459 | } else { |
| 450 | snprintf(name, namelen, "%s", class->name); | 460 | snprintf(name, namelen, "%s", cname); |
| 451 | } | 461 | } |
| 462 | rcu_read_unlock_sched(); | ||
| 463 | |||
| 452 | namelen = strlen(name); | 464 | namelen = strlen(name); |
| 453 | if (class->name_version > 1) { | 465 | if (class->name_version > 1) { |
| 454 | snprintf(name+namelen, 3, "#%d", class->name_version); | 466 | snprintf(name+namelen, 3, "#%d", class->name_version); |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b73279367087..b025295f4966 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) | |||
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | /* | 267 | /* |
| 268 | * Called by sched_setscheduler() to check whether the priority change | 268 | * Called by sched_setscheduler() to get the priority which will be |
| 269 | * is overruled by a possible priority boosting. | 269 | * effective after the change. |
| 270 | */ | 270 | */ |
| 271 | int rt_mutex_check_prio(struct task_struct *task, int newprio) | 271 | int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) |
| 272 | { | 272 | { |
| 273 | if (!task_has_pi_waiters(task)) | 273 | if (!task_has_pi_waiters(task)) |
| 274 | return 0; | 274 | return newprio; |
| 275 | 275 | ||
| 276 | return task_top_pi_waiter(task)->task->prio <= newprio; | 276 | if (task_top_pi_waiter(task)->task->prio <= newprio) |
| 277 | return task_top_pi_waiter(task)->task->prio; | ||
| 278 | return newprio; | ||
| 277 | } | 279 | } |
| 278 | 280 | ||
| 279 | /* | 281 | /* |
diff --git a/kernel/module.c b/kernel/module.c index 42a1d2afb217..cfc9e843a924 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3370 | module_bug_cleanup(mod); | 3370 | module_bug_cleanup(mod); |
| 3371 | mutex_unlock(&module_mutex); | 3371 | mutex_unlock(&module_mutex); |
| 3372 | 3372 | ||
| 3373 | blocking_notifier_call_chain(&module_notify_list, | ||
| 3374 | MODULE_STATE_GOING, mod); | ||
| 3375 | |||
| 3373 | /* we can't deallocate the module until we clear memory protection */ | 3376 | /* we can't deallocate the module until we clear memory protection */ |
| 3374 | unset_module_init_ro_nx(mod); | 3377 | unset_module_init_ro_nx(mod); |
| 3375 | unset_module_core_ro_nx(mod); | 3378 | unset_module_core_ro_nx(mod); |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 233165da782f..8cf7304b2867 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | |||
| 162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; | 162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; |
| 163 | module_param(kthread_prio, int, 0644); | 163 | module_param(kthread_prio, int, 0644); |
| 164 | 164 | ||
| 165 | /* Delay in jiffies for grace-period initialization delays. */ | 165 | /* Delay in jiffies for grace-period initialization delays, debug only. */ |
| 166 | static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) | 166 | #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT |
| 167 | ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY | 167 | static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; |
| 168 | : 0; | ||
| 169 | module_param(gp_init_delay, int, 0644); | 168 | module_param(gp_init_delay, int, 0644); |
| 169 | #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
| 170 | static const int gp_init_delay; | ||
| 171 | #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
| 172 | #define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ | ||
| 170 | 173 | ||
| 171 | /* | 174 | /* |
| 172 | * Track the rcutorture test sequence number and the update version | 175 | * Track the rcutorture test sequence number and the update version |
| @@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
| 1843 | raw_spin_unlock_irq(&rnp->lock); | 1846 | raw_spin_unlock_irq(&rnp->lock); |
| 1844 | cond_resched_rcu_qs(); | 1847 | cond_resched_rcu_qs(); |
| 1845 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1848 | ACCESS_ONCE(rsp->gp_activity) = jiffies; |
| 1846 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) && | 1849 | if (gp_init_delay > 0 && |
| 1847 | gp_init_delay > 0 && | 1850 | !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) |
| 1848 | !(rsp->gpnum % (rcu_num_nodes * 10))) | ||
| 1849 | schedule_timeout_uninterruptible(gp_init_delay); | 1851 | schedule_timeout_uninterruptible(gp_init_delay); |
| 1850 | } | 1852 | } |
| 1851 | 1853 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f9123a82cbb6..123673291ffb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
| 1016 | rq_clock_skip_update(rq, true); | 1016 | rq_clock_skip_update(rq, true); |
| 1017 | } | 1017 | } |
| 1018 | 1018 | ||
| 1019 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
| 1020 | |||
| 1021 | void register_task_migration_notifier(struct notifier_block *n) | ||
| 1022 | { | ||
| 1023 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | #ifdef CONFIG_SMP | 1019 | #ifdef CONFIG_SMP |
| 1027 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 1020 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
| 1028 | { | 1021 | { |
| @@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
| 1053 | trace_sched_migrate_task(p, new_cpu); | 1046 | trace_sched_migrate_task(p, new_cpu); |
| 1054 | 1047 | ||
| 1055 | if (task_cpu(p) != new_cpu) { | 1048 | if (task_cpu(p) != new_cpu) { |
| 1056 | struct task_migration_notifier tmn; | ||
| 1057 | |||
| 1058 | if (p->sched_class->migrate_task_rq) | 1049 | if (p->sched_class->migrate_task_rq) |
| 1059 | p->sched_class->migrate_task_rq(p, new_cpu); | 1050 | p->sched_class->migrate_task_rq(p, new_cpu); |
| 1060 | p->se.nr_migrations++; | 1051 | p->se.nr_migrations++; |
| 1061 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); | 1052 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
| 1062 | |||
| 1063 | tmn.task = p; | ||
| 1064 | tmn.from_cpu = task_cpu(p); | ||
| 1065 | tmn.to_cpu = new_cpu; | ||
| 1066 | |||
| 1067 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
| 1068 | } | 1053 | } |
| 1069 | 1054 | ||
| 1070 | __set_task_cpu(p, new_cpu); | 1055 | __set_task_cpu(p, new_cpu); |
| @@ -3315,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p, | |||
| 3315 | 3300 | ||
| 3316 | /* Actually do priority change: must hold pi & rq lock. */ | 3301 | /* Actually do priority change: must hold pi & rq lock. */ |
| 3317 | static void __setscheduler(struct rq *rq, struct task_struct *p, | 3302 | static void __setscheduler(struct rq *rq, struct task_struct *p, |
| 3318 | const struct sched_attr *attr) | 3303 | const struct sched_attr *attr, bool keep_boost) |
| 3319 | { | 3304 | { |
| 3320 | __setscheduler_params(p, attr); | 3305 | __setscheduler_params(p, attr); |
| 3321 | 3306 | ||
| 3322 | /* | 3307 | /* |
| 3323 | * If we get here, there was no pi waiters boosting the | 3308 | * Keep a potential priority boosting if called from |
| 3324 | * task. It is safe to use the normal prio. | 3309 | * sched_setscheduler(). |
| 3325 | */ | 3310 | */ |
| 3326 | p->prio = normal_prio(p); | 3311 | if (keep_boost) |
| 3312 | p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); | ||
| 3313 | else | ||
| 3314 | p->prio = normal_prio(p); | ||
| 3327 | 3315 | ||
| 3328 | if (dl_prio(p->prio)) | 3316 | if (dl_prio(p->prio)) |
| 3329 | p->sched_class = &dl_sched_class; | 3317 | p->sched_class = &dl_sched_class; |
| @@ -3423,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p, | |||
| 3423 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : | 3411 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : |
| 3424 | MAX_RT_PRIO - 1 - attr->sched_priority; | 3412 | MAX_RT_PRIO - 1 - attr->sched_priority; |
| 3425 | int retval, oldprio, oldpolicy = -1, queued, running; | 3413 | int retval, oldprio, oldpolicy = -1, queued, running; |
| 3426 | int policy = attr->sched_policy; | 3414 | int new_effective_prio, policy = attr->sched_policy; |
| 3427 | unsigned long flags; | 3415 | unsigned long flags; |
| 3428 | const struct sched_class *prev_class; | 3416 | const struct sched_class *prev_class; |
| 3429 | struct rq *rq; | 3417 | struct rq *rq; |
| @@ -3605,15 +3593,14 @@ change: | |||
| 3605 | oldprio = p->prio; | 3593 | oldprio = p->prio; |
| 3606 | 3594 | ||
| 3607 | /* | 3595 | /* |
| 3608 | * Special case for priority boosted tasks. | 3596 | * Take priority boosted tasks into account. If the new |
| 3609 | * | 3597 | * effective priority is unchanged, we just store the new |
| 3610 | * If the new priority is lower or equal (user space view) | ||
| 3611 | * than the current (boosted) priority, we just store the new | ||
| 3612 | * normal parameters and do not touch the scheduler class and | 3598 | * normal parameters and do not touch the scheduler class and |
| 3613 | * the runqueue. This will be done when the task deboost | 3599 | * the runqueue. This will be done when the task deboost |
| 3614 | * itself. | 3600 | * itself. |
| 3615 | */ | 3601 | */ |
| 3616 | if (rt_mutex_check_prio(p, newprio)) { | 3602 | new_effective_prio = rt_mutex_get_effective_prio(p, newprio); |
| 3603 | if (new_effective_prio == oldprio) { | ||
| 3617 | __setscheduler_params(p, attr); | 3604 | __setscheduler_params(p, attr); |
| 3618 | task_rq_unlock(rq, p, &flags); | 3605 | task_rq_unlock(rq, p, &flags); |
| 3619 | return 0; | 3606 | return 0; |
| @@ -3627,7 +3614,7 @@ change: | |||
| 3627 | put_prev_task(rq, p); | 3614 | put_prev_task(rq, p); |
| 3628 | 3615 | ||
| 3629 | prev_class = p->sched_class; | 3616 | prev_class = p->sched_class; |
| 3630 | __setscheduler(rq, p, attr); | 3617 | __setscheduler(rq, p, attr, true); |
| 3631 | 3618 | ||
| 3632 | if (running) | 3619 | if (running) |
| 3633 | p->sched_class->set_curr_task(rq); | 3620 | p->sched_class->set_curr_task(rq); |
| @@ -4402,10 +4389,7 @@ long __sched io_schedule_timeout(long timeout) | |||
| 4402 | long ret; | 4389 | long ret; |
| 4403 | 4390 | ||
| 4404 | current->in_iowait = 1; | 4391 | current->in_iowait = 1; |
| 4405 | if (old_iowait) | 4392 | blk_schedule_flush_plug(current); |
| 4406 | blk_schedule_flush_plug(current); | ||
| 4407 | else | ||
| 4408 | blk_flush_plug(current); | ||
| 4409 | 4393 | ||
| 4410 | delayacct_blkio_start(); | 4394 | delayacct_blkio_start(); |
| 4411 | rq = raw_rq(); | 4395 | rq = raw_rq(); |
| @@ -7012,27 +6996,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | |||
| 7012 | unsigned long flags; | 6996 | unsigned long flags; |
| 7013 | long cpu = (long)hcpu; | 6997 | long cpu = (long)hcpu; |
| 7014 | struct dl_bw *dl_b; | 6998 | struct dl_bw *dl_b; |
| 6999 | bool overflow; | ||
| 7000 | int cpus; | ||
| 7015 | 7001 | ||
| 7016 | switch (action & ~CPU_TASKS_FROZEN) { | 7002 | switch (action) { |
| 7017 | case CPU_DOWN_PREPARE: | 7003 | case CPU_DOWN_PREPARE: |
| 7018 | /* explicitly allow suspend */ | 7004 | rcu_read_lock_sched(); |
| 7019 | if (!(action & CPU_TASKS_FROZEN)) { | 7005 | dl_b = dl_bw_of(cpu); |
| 7020 | bool overflow; | ||
| 7021 | int cpus; | ||
| 7022 | |||
| 7023 | rcu_read_lock_sched(); | ||
| 7024 | dl_b = dl_bw_of(cpu); | ||
| 7025 | 7006 | ||
| 7026 | raw_spin_lock_irqsave(&dl_b->lock, flags); | 7007 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7027 | cpus = dl_bw_cpus(cpu); | 7008 | cpus = dl_bw_cpus(cpu); |
| 7028 | overflow = __dl_overflow(dl_b, cpus, 0, 0); | 7009 | overflow = __dl_overflow(dl_b, cpus, 0, 0); |
| 7029 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); | 7010 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7030 | 7011 | ||
| 7031 | rcu_read_unlock_sched(); | 7012 | rcu_read_unlock_sched(); |
| 7032 | 7013 | ||
| 7033 | if (overflow) | 7014 | if (overflow) |
| 7034 | return notifier_from_errno(-EBUSY); | 7015 | return notifier_from_errno(-EBUSY); |
| 7035 | } | ||
| 7036 | cpuset_update_active_cpus(false); | 7016 | cpuset_update_active_cpus(false); |
| 7037 | break; | 7017 | break; |
| 7038 | case CPU_DOWN_PREPARE_FROZEN: | 7018 | case CPU_DOWN_PREPARE_FROZEN: |
| @@ -7361,7 +7341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
| 7361 | queued = task_on_rq_queued(p); | 7341 | queued = task_on_rq_queued(p); |
| 7362 | if (queued) | 7342 | if (queued) |
| 7363 | dequeue_task(rq, p, 0); | 7343 | dequeue_task(rq, p, 0); |
| 7364 | __setscheduler(rq, p, &attr); | 7344 | __setscheduler(rq, p, &attr, false); |
| 7365 | if (queued) { | 7345 | if (queued) { |
| 7366 | enqueue_task(rq, p, 0); | 7346 | enqueue_task(rq, p, 0); |
| 7367 | resched_curr(rq); | 7347 | resched_curr(rq); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ffeaa4105e48..c2980e8733bc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work) | |||
| 2181 | } | 2181 | } |
| 2182 | for (; vma; vma = vma->vm_next) { | 2182 | for (; vma; vma = vma->vm_next) { |
| 2183 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || | 2183 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || |
| 2184 | is_vm_hugetlb_page(vma)) { | 2184 | is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { |
| 2185 | continue; | 2185 | continue; |
| 2186 | } | 2186 | } |
| 2187 | 2187 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index deef1caa94c6..fefcb1fa5160 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -81,7 +81,6 @@ static void cpuidle_idle_call(void) | |||
| 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
| 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
| 84 | unsigned int broadcast; | ||
| 85 | bool reflect; | 84 | bool reflect; |
| 86 | 85 | ||
| 87 | /* | 86 | /* |
| @@ -150,17 +149,6 @@ static void cpuidle_idle_call(void) | |||
| 150 | goto exit_idle; | 149 | goto exit_idle; |
| 151 | } | 150 | } |
| 152 | 151 | ||
| 153 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Tell the time framework to switch to a broadcast timer | ||
| 157 | * because our local timer will be shutdown. If a local timer | ||
| 158 | * is used from another cpu as a broadcast timer, this call may | ||
| 159 | * fail if it is not available | ||
| 160 | */ | ||
| 161 | if (broadcast && tick_broadcast_enter()) | ||
| 162 | goto use_default; | ||
| 163 | |||
| 164 | /* Take note of the planned idle state. */ | 152 | /* Take note of the planned idle state. */ |
| 165 | idle_set_state(this_rq(), &drv->states[next_state]); | 153 | idle_set_state(this_rq(), &drv->states[next_state]); |
| 166 | 154 | ||
| @@ -174,8 +162,8 @@ static void cpuidle_idle_call(void) | |||
| 174 | /* The cpu is no longer idle or about to enter idle. */ | 162 | /* The cpu is no longer idle or about to enter idle. */ |
| 175 | idle_set_state(this_rq(), NULL); | 163 | idle_set_state(this_rq(), NULL); |
| 176 | 164 | ||
| 177 | if (broadcast) | 165 | if (entered_state == -EBUSY) |
| 178 | tick_broadcast_exit(); | 166 | goto use_default; |
| 179 | 167 | ||
| 180 | /* | 168 | /* |
| 181 | * Give the governor an opportunity to reflect on the outcome | 169 | * Give the governor an opportunity to reflect on the outcome |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 11dc22a6983b..637a09461c1d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev, | |||
| 117 | /* Transition with new state-specific callbacks */ | 117 | /* Transition with new state-specific callbacks */ |
| 118 | switch (state) { | 118 | switch (state) { |
| 119 | case CLOCK_EVT_STATE_DETACHED: | 119 | case CLOCK_EVT_STATE_DETACHED: |
| 120 | /* | 120 | /* The clockevent device is getting replaced. Shut it down. */ |
| 121 | * This is an internal state, which is guaranteed to go from | ||
| 122 | * SHUTDOWN to DETACHED. No driver interaction required. | ||
| 123 | */ | ||
| 124 | return 0; | ||
| 125 | 121 | ||
| 126 | case CLOCK_EVT_STATE_SHUTDOWN: | 122 | case CLOCK_EVT_STATE_SHUTDOWN: |
| 127 | return dev->set_state_shutdown(dev); | 123 | return dev->set_state_shutdown(dev); |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 76d4bd962b19..93ef7190bdea 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
| @@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
| 266 | /* | 266 | /* |
| 267 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
| 268 | */ | 268 | */ |
| 269 | u64 __ktime_divns(const ktime_t kt, s64 div) | 269 | s64 __ktime_divns(const ktime_t kt, s64 div) |
| 270 | { | 270 | { |
| 271 | u64 dclc; | ||
| 272 | int sft = 0; | 271 | int sft = 0; |
| 272 | s64 dclc; | ||
| 273 | u64 tmp; | ||
| 273 | 274 | ||
| 274 | dclc = ktime_to_ns(kt); | 275 | dclc = ktime_to_ns(kt); |
| 276 | tmp = dclc < 0 ? -dclc : dclc; | ||
| 277 | |||
| 275 | /* Make sure the divisor is less than 2^32: */ | 278 | /* Make sure the divisor is less than 2^32: */ |
| 276 | while (div >> 32) { | 279 | while (div >> 32) { |
| 277 | sft++; | 280 | sft++; |
| 278 | div >>= 1; | 281 | div >>= 1; |
| 279 | } | 282 | } |
| 280 | dclc >>= sft; | 283 | tmp >>= sft; |
| 281 | do_div(dclc, (unsigned long) div); | 284 | do_div(tmp, (unsigned long) div); |
| 282 | 285 | return dclc < 0 ? -tmp : tmp; | |
| 283 | return dclc; | ||
| 284 | } | 286 | } |
| 285 | EXPORT_SYMBOL_GPL(__ktime_divns); | 287 | EXPORT_SYMBOL_GPL(__ktime_divns); |
| 286 | #endif /* BITS_PER_LONG >= 64 */ | 288 | #endif /* BITS_PER_LONG >= 64 */ |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 13d945c0d03f..1b28df2d9104 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void) | |||
| 450 | 450 | ||
| 451 | if (producer_fifo >= 0) { | 451 | if (producer_fifo >= 0) { |
| 452 | struct sched_param param = { | 452 | struct sched_param param = { |
| 453 | .sched_priority = consumer_fifo | 453 | .sched_priority = producer_fifo |
| 454 | }; | 454 | }; |
| 455 | sched_setscheduler(producer, SCHED_FIFO, ¶m); | 455 | sched_setscheduler(producer, SCHED_FIFO, ¶m); |
| 456 | } else | 456 | } else |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 692bf7184c8c..25a086bcb700 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | |||
| 178 | EXPORT_SYMBOL(ftrace_print_hex_seq); | 178 | EXPORT_SYMBOL(ftrace_print_hex_seq); |
| 179 | 179 | ||
| 180 | const char * | 180 | const char * |
| 181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, | 181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, |
| 182 | size_t el_size) | 182 | size_t el_size) |
| 183 | { | 183 | { |
| 184 | const char *ret = trace_seq_buffer_ptr(p); | 184 | const char *ret = trace_seq_buffer_ptr(p); |
| 185 | const char *prefix = ""; | 185 | const char *prefix = ""; |
| 186 | void *ptr = (void *)buf; | 186 | void *ptr = (void *)buf; |
| 187 | size_t buf_len = count * el_size; | ||
| 187 | 188 | ||
| 188 | trace_seq_putc(p, '{'); | 189 | trace_seq_putc(p, '{'); |
| 189 | 190 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 2316f50b07a4..581a68a04c64 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -41,6 +41,8 @@ | |||
| 41 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | 41 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) |
| 42 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | 42 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) |
| 43 | 43 | ||
| 44 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
| 45 | |||
| 44 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 46 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 45 | static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; | 47 | static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; |
| 46 | #else | 48 | #else |
| @@ -608,26 +610,36 @@ void watchdog_nmi_enable_all(void) | |||
| 608 | { | 610 | { |
| 609 | int cpu; | 611 | int cpu; |
| 610 | 612 | ||
| 611 | if (!watchdog_user_enabled) | 613 | mutex_lock(&watchdog_proc_mutex); |
| 612 | return; | 614 | |
| 615 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
| 616 | goto unlock; | ||
| 613 | 617 | ||
| 614 | get_online_cpus(); | 618 | get_online_cpus(); |
| 615 | for_each_online_cpu(cpu) | 619 | for_each_online_cpu(cpu) |
| 616 | watchdog_nmi_enable(cpu); | 620 | watchdog_nmi_enable(cpu); |
| 617 | put_online_cpus(); | 621 | put_online_cpus(); |
| 622 | |||
| 623 | unlock: | ||
| 624 | mutex_unlock(&watchdog_proc_mutex); | ||
| 618 | } | 625 | } |
| 619 | 626 | ||
| 620 | void watchdog_nmi_disable_all(void) | 627 | void watchdog_nmi_disable_all(void) |
| 621 | { | 628 | { |
| 622 | int cpu; | 629 | int cpu; |
| 623 | 630 | ||
| 631 | mutex_lock(&watchdog_proc_mutex); | ||
| 632 | |||
| 624 | if (!watchdog_running) | 633 | if (!watchdog_running) |
| 625 | return; | 634 | goto unlock; |
| 626 | 635 | ||
| 627 | get_online_cpus(); | 636 | get_online_cpus(); |
| 628 | for_each_online_cpu(cpu) | 637 | for_each_online_cpu(cpu) |
| 629 | watchdog_nmi_disable(cpu); | 638 | watchdog_nmi_disable(cpu); |
| 630 | put_online_cpus(); | 639 | put_online_cpus(); |
| 640 | |||
| 641 | unlock: | ||
| 642 | mutex_unlock(&watchdog_proc_mutex); | ||
| 631 | } | 643 | } |
| 632 | #else | 644 | #else |
| 633 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } | 645 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } |
| @@ -744,8 +756,6 @@ static int proc_watchdog_update(void) | |||
| 744 | 756 | ||
| 745 | } | 757 | } |
| 746 | 758 | ||
| 747 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
| 748 | |||
| 749 | /* | 759 | /* |
| 750 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter | 760 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
| 751 | * | 761 | * |
