diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/hrtimer.c | 22 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 7 | ||||
| -rw-r--r-- | kernel/module.c | 6 | ||||
| -rw-r--r-- | kernel/softirq.c | 5 | ||||
| -rw-r--r-- | kernel/timer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 27 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 2 |
7 files changed, 43 insertions, 28 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d55092ceee29..6b715c0af1b1 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -234,6 +234,11 @@ again: | |||
| 234 | goto again; | 234 | goto again; |
| 235 | } | 235 | } |
| 236 | timer->base = new_base; | 236 | timer->base = new_base; |
| 237 | } else { | ||
| 238 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | ||
| 239 | cpu = this_cpu; | ||
| 240 | goto again; | ||
| 241 | } | ||
| 237 | } | 242 | } |
| 238 | return new_base; | 243 | return new_base; |
| 239 | } | 244 | } |
| @@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
| 569 | 574 | ||
| 570 | cpu_base->expires_next.tv64 = expires_next.tv64; | 575 | cpu_base->expires_next.tv64 = expires_next.tv64; |
| 571 | 576 | ||
| 577 | /* | ||
| 578 | * If a hang was detected in the last timer interrupt then we | ||
| 579 | * leave the hang delay active in the hardware. We want the | ||
| 580 | * system to make progress. That also prevents the following | ||
| 581 | * scenario: | ||
| 582 | * T1 expires 50ms from now | ||
| 583 | * T2 expires 5s from now | ||
| 584 | * | ||
| 585 | * T1 is removed, so this code is called and would reprogram | ||
| 586 | * the hardware to 5s from now. Any hrtimer_start after that | ||
| 587 | * will not reprogram the hardware due to hang_detected being | ||
| 588 | * set. So we'd effectivly block all timers until the T2 event | ||
| 589 | * fires. | ||
| 590 | */ | ||
| 591 | if (cpu_base->hang_detected) | ||
| 592 | return; | ||
| 593 | |||
| 572 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 594 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
| 573 | tick_program_event(cpu_base->expires_next, 1); | 595 | tick_program_event(cpu_base->expires_next, 1); |
| 574 | } | 596 | } |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index a7174617616b..bb07f2928f4b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | |||
| 363 | if (from > irq) | 363 | if (from > irq) |
| 364 | return -EINVAL; | 364 | return -EINVAL; |
| 365 | from = irq; | 365 | from = irq; |
| 366 | } else { | ||
| 367 | /* | ||
| 368 | * For interrupts which are freely allocated the | ||
| 369 | * architecture can force a lower bound to the @from | ||
| 370 | * argument. x86 uses this to exclude the GSI space. | ||
| 371 | */ | ||
| 372 | from = arch_dynirq_lower_bound(from); | ||
| 366 | } | 373 | } |
| 367 | 374 | ||
| 368 | mutex_lock(&sparse_irq_lock); | 375 | mutex_lock(&sparse_irq_lock); |
diff --git a/kernel/module.c b/kernel/module.c index 11869408f79b..079c4615607d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
| 815 | return -EFAULT; | 815 | return -EFAULT; |
| 816 | name[MODULE_NAME_LEN-1] = '\0'; | 816 | name[MODULE_NAME_LEN-1] = '\0'; |
| 817 | 817 | ||
| 818 | if (!(flags & O_NONBLOCK)) | ||
| 819 | pr_warn("waiting module removal not supported: please upgrade\n"); | ||
| 820 | |||
| 821 | if (mutex_lock_interruptible(&module_mutex) != 0) | 818 | if (mutex_lock_interruptible(&module_mutex) != 0) |
| 822 | return -EINTR; | 819 | return -EINTR; |
| 823 | 820 | ||
| @@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3271 | 3268 | ||
| 3272 | dynamic_debug_setup(info->debug, info->num_debug); | 3269 | dynamic_debug_setup(info->debug, info->num_debug); |
| 3273 | 3270 | ||
| 3271 | /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ | ||
| 3272 | ftrace_module_init(mod); | ||
| 3273 | |||
| 3274 | /* Finally it's fully formed, ready to start executing. */ | 3274 | /* Finally it's fully formed, ready to start executing. */ |
| 3275 | err = complete_formation(mod, info); | 3275 | err = complete_formation(mod, info); |
| 3276 | if (err) | 3276 | if (err) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b50990a5bea0..33e4648ae0e7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void) | |||
| 779 | { | 779 | { |
| 780 | return 0; | 780 | return 0; |
| 781 | } | 781 | } |
| 782 | |||
| 783 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | ||
| 784 | { | ||
| 785 | return from; | ||
| 786 | } | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 87bd529879c2..3bb01a323b2a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |||
| 838 | 838 | ||
| 839 | bit = find_last_bit(&mask, BITS_PER_LONG); | 839 | bit = find_last_bit(&mask, BITS_PER_LONG); |
| 840 | 840 | ||
| 841 | mask = (1 << bit) - 1; | 841 | mask = (1UL << bit) - 1; |
| 842 | 842 | ||
| 843 | expires_limit = expires_limit & ~(mask); | 843 | expires_limit = expires_limit & ~(mask); |
| 844 | 844 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1fd4b9479210..4a54a25afa2f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod, | |||
| 4330 | ftrace_process_locs(mod, start, end); | 4330 | ftrace_process_locs(mod, start, end); |
| 4331 | } | 4331 | } |
| 4332 | 4332 | ||
| 4333 | static int ftrace_module_notify_enter(struct notifier_block *self, | 4333 | void ftrace_module_init(struct module *mod) |
| 4334 | unsigned long val, void *data) | ||
| 4335 | { | 4334 | { |
| 4336 | struct module *mod = data; | 4335 | ftrace_init_module(mod, mod->ftrace_callsites, |
| 4337 | 4336 | mod->ftrace_callsites + | |
| 4338 | if (val == MODULE_STATE_COMING) | 4337 | mod->num_ftrace_callsites); |
| 4339 | ftrace_init_module(mod, mod->ftrace_callsites, | ||
| 4340 | mod->ftrace_callsites + | ||
| 4341 | mod->num_ftrace_callsites); | ||
| 4342 | return 0; | ||
| 4343 | } | 4338 | } |
| 4344 | 4339 | ||
| 4345 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4340 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| @@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4353 | return 0; | 4348 | return 0; |
| 4354 | } | 4349 | } |
| 4355 | #else | 4350 | #else |
| 4356 | static int ftrace_module_notify_enter(struct notifier_block *self, | ||
| 4357 | unsigned long val, void *data) | ||
| 4358 | { | ||
| 4359 | return 0; | ||
| 4360 | } | ||
| 4361 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4351 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| 4362 | unsigned long val, void *data) | 4352 | unsigned long val, void *data) |
| 4363 | { | 4353 | { |
| @@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4365 | } | 4355 | } |
| 4366 | #endif /* CONFIG_MODULES */ | 4356 | #endif /* CONFIG_MODULES */ |
| 4367 | 4357 | ||
| 4368 | struct notifier_block ftrace_module_enter_nb = { | ||
| 4369 | .notifier_call = ftrace_module_notify_enter, | ||
| 4370 | .priority = INT_MAX, /* Run before anything that can use kprobes */ | ||
| 4371 | }; | ||
| 4372 | |||
| 4373 | struct notifier_block ftrace_module_exit_nb = { | 4358 | struct notifier_block ftrace_module_exit_nb = { |
| 4374 | .notifier_call = ftrace_module_notify_exit, | 4359 | .notifier_call = ftrace_module_notify_exit, |
| 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| @@ -4403,10 +4388,6 @@ void __init ftrace_init(void) | |||
| 4403 | __start_mcount_loc, | 4388 | __start_mcount_loc, |
| 4404 | __stop_mcount_loc); | 4389 | __stop_mcount_loc); |
| 4405 | 4390 | ||
| 4406 | ret = register_module_notifier(&ftrace_module_enter_nb); | ||
| 4407 | if (ret) | ||
| 4408 | pr_warning("Failed to register trace ftrace module enter notifier\n"); | ||
| 4409 | |||
| 4410 | ret = register_module_notifier(&ftrace_module_exit_nb); | 4391 | ret = register_module_notifier(&ftrace_module_exit_nb); |
| 4411 | if (ret) | 4392 | if (ret) |
| 4412 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | 4393 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 925f537f07d1..4747b476a030 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec) | |||
| 77 | data->ops->func(data); | 77 | data->ops->func(data); |
| 78 | continue; | 78 | continue; |
| 79 | } | 79 | } |
| 80 | filter = rcu_dereference(data->filter); | 80 | filter = rcu_dereference_sched(data->filter); |
| 81 | if (filter && !filter_match_preds(filter, rec)) | 81 | if (filter && !filter_match_preds(filter, rec)) |
| 82 | continue; | 82 | continue; |
| 83 | if (data->cmd_ops->post_trigger) { | 83 | if (data->cmd_ops->post_trigger) { |
