diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/hrtimer.c | 22 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 7 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 17 | ||||
| -rw-r--r-- | kernel/module.c | 6 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 3 | ||||
| -rw-r--r-- | kernel/softirq.c | 5 | ||||
| -rw-r--r-- | kernel/timer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 27 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 2 |
9 files changed, 52 insertions, 39 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d55092ceee29..6b715c0af1b1 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -234,6 +234,11 @@ again: | |||
| 234 | goto again; | 234 | goto again; |
| 235 | } | 235 | } |
| 236 | timer->base = new_base; | 236 | timer->base = new_base; |
| 237 | } else { | ||
| 238 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | ||
| 239 | cpu = this_cpu; | ||
| 240 | goto again; | ||
| 241 | } | ||
| 237 | } | 242 | } |
| 238 | return new_base; | 243 | return new_base; |
| 239 | } | 244 | } |
| @@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
| 569 | 574 | ||
| 570 | cpu_base->expires_next.tv64 = expires_next.tv64; | 575 | cpu_base->expires_next.tv64 = expires_next.tv64; |
| 571 | 576 | ||
| 577 | /* | ||
| 578 | * If a hang was detected in the last timer interrupt then we | ||
| 579 | * leave the hang delay active in the hardware. We want the | ||
| 580 | * system to make progress. That also prevents the following | ||
| 581 | * scenario: | ||
| 582 | * T1 expires 50ms from now | ||
| 583 | * T2 expires 5s from now | ||
| 584 | * | ||
| 585 | * T1 is removed, so this code is called and would reprogram | ||
| 586 | * the hardware to 5s from now. Any hrtimer_start after that | ||
| 587 | * will not reprogram the hardware due to hang_detected being | ||
| 588 | * set. So we'd effectivly block all timers until the T2 event | ||
| 589 | * fires. | ||
| 590 | */ | ||
| 591 | if (cpu_base->hang_detected) | ||
| 592 | return; | ||
| 593 | |||
| 572 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 594 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
| 573 | tick_program_event(cpu_base->expires_next, 1); | 595 | tick_program_event(cpu_base->expires_next, 1); |
| 574 | } | 596 | } |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index a7174617616b..bb07f2928f4b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | |||
| 363 | if (from > irq) | 363 | if (from > irq) |
| 364 | return -EINVAL; | 364 | return -EINVAL; |
| 365 | from = irq; | 365 | from = irq; |
| 366 | } else { | ||
| 367 | /* | ||
| 368 | * For interrupts which are freely allocated the | ||
| 369 | * architecture can force a lower bound to the @from | ||
| 370 | * argument. x86 uses this to exclude the GSI space. | ||
| 371 | */ | ||
| 372 | from = arch_dynirq_lower_bound(from); | ||
| 366 | } | 373 | } |
| 367 | 374 | ||
| 368 | mutex_lock(&sparse_irq_lock); | 375 | mutex_lock(&sparse_irq_lock); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2486a4c1a710..d34131ca372b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
| 180 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 180 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| 181 | int ret; | 181 | int ret; |
| 182 | 182 | ||
| 183 | ret = chip->irq_set_affinity(data, mask, false); | 183 | ret = chip->irq_set_affinity(data, mask, force); |
| 184 | switch (ret) { | 184 | switch (ret) { |
| 185 | case IRQ_SET_MASK_OK: | 185 | case IRQ_SET_MASK_OK: |
| 186 | cpumask_copy(data->affinity, mask); | 186 | cpumask_copy(data->affinity, mask); |
| @@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
| 192 | return ret; | 192 | return ret; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 195 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
| 196 | bool force) | ||
| 196 | { | 197 | { |
| 197 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 198 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| 198 | struct irq_desc *desc = irq_data_to_desc(data); | 199 | struct irq_desc *desc = irq_data_to_desc(data); |
| @@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
| 202 | return -EINVAL; | 203 | return -EINVAL; |
| 203 | 204 | ||
| 204 | if (irq_can_move_pcntxt(data)) { | 205 | if (irq_can_move_pcntxt(data)) { |
| 205 | ret = irq_do_set_affinity(data, mask, false); | 206 | ret = irq_do_set_affinity(data, mask, force); |
| 206 | } else { | 207 | } else { |
| 207 | irqd_set_move_pending(data); | 208 | irqd_set_move_pending(data); |
| 208 | irq_copy_pending(desc, mask); | 209 | irq_copy_pending(desc, mask); |
| @@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
| 217 | return ret; | 218 | return ret; |
| 218 | } | 219 | } |
| 219 | 220 | ||
| 220 | /** | 221 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
| 221 | * irq_set_affinity - Set the irq affinity of a given irq | ||
| 222 | * @irq: Interrupt to set affinity | ||
| 223 | * @mask: cpumask | ||
| 224 | * | ||
| 225 | */ | ||
| 226 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
| 227 | { | 222 | { |
| 228 | struct irq_desc *desc = irq_to_desc(irq); | 223 | struct irq_desc *desc = irq_to_desc(irq); |
| 229 | unsigned long flags; | 224 | unsigned long flags; |
| @@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 233 | return -EINVAL; | 228 | return -EINVAL; |
| 234 | 229 | ||
| 235 | raw_spin_lock_irqsave(&desc->lock, flags); | 230 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 236 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | 231 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
| 237 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 232 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 238 | return ret; | 233 | return ret; |
| 239 | } | 234 | } |
diff --git a/kernel/module.c b/kernel/module.c index 11869408f79b..079c4615607d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
| 815 | return -EFAULT; | 815 | return -EFAULT; |
| 816 | name[MODULE_NAME_LEN-1] = '\0'; | 816 | name[MODULE_NAME_LEN-1] = '\0'; |
| 817 | 817 | ||
| 818 | if (!(flags & O_NONBLOCK)) | ||
| 819 | pr_warn("waiting module removal not supported: please upgrade\n"); | ||
| 820 | |||
| 821 | if (mutex_lock_interruptible(&module_mutex) != 0) | 818 | if (mutex_lock_interruptible(&module_mutex) != 0) |
| 822 | return -EINTR; | 819 | return -EINTR; |
| 823 | 820 | ||
| @@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3271 | 3268 | ||
| 3272 | dynamic_debug_setup(info->debug, info->num_debug); | 3269 | dynamic_debug_setup(info->debug, info->num_debug); |
| 3273 | 3270 | ||
| 3271 | /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ | ||
| 3272 | ftrace_module_init(mod); | ||
| 3273 | |||
| 3274 | /* Finally it's fully formed, ready to start executing. */ | 3274 | /* Finally it's fully formed, ready to start executing. */ |
| 3275 | err = complete_formation(mod, info); | 3275 | err = complete_formation(mod, info); |
| 3276 | if (err) | 3276 | if (err) |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c3ad9cafe930..8233cd4047d7 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/console.h> | 15 | #include <linux/console.h> |
| 16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
| 17 | #include <linux/cpuidle.h> | ||
| 17 | #include <linux/syscalls.h> | 18 | #include <linux/syscalls.h> |
| 18 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
| 19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
| @@ -53,7 +54,9 @@ static void freeze_begin(void) | |||
| 53 | 54 | ||
| 54 | static void freeze_enter(void) | 55 | static void freeze_enter(void) |
| 55 | { | 56 | { |
| 57 | cpuidle_resume(); | ||
| 56 | wait_event(suspend_freeze_wait_head, suspend_freeze_wake); | 58 | wait_event(suspend_freeze_wait_head, suspend_freeze_wake); |
| 59 | cpuidle_pause(); | ||
| 57 | } | 60 | } |
| 58 | 61 | ||
| 59 | void freeze_wake(void) | 62 | void freeze_wake(void) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b50990a5bea0..33e4648ae0e7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void) | |||
| 779 | { | 779 | { |
| 780 | return 0; | 780 | return 0; |
| 781 | } | 781 | } |
| 782 | |||
| 783 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | ||
| 784 | { | ||
| 785 | return from; | ||
| 786 | } | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 87bd529879c2..3bb01a323b2a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |||
| 838 | 838 | ||
| 839 | bit = find_last_bit(&mask, BITS_PER_LONG); | 839 | bit = find_last_bit(&mask, BITS_PER_LONG); |
| 840 | 840 | ||
| 841 | mask = (1 << bit) - 1; | 841 | mask = (1UL << bit) - 1; |
| 842 | 842 | ||
| 843 | expires_limit = expires_limit & ~(mask); | 843 | expires_limit = expires_limit & ~(mask); |
| 844 | 844 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1fd4b9479210..4a54a25afa2f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod, | |||
| 4330 | ftrace_process_locs(mod, start, end); | 4330 | ftrace_process_locs(mod, start, end); |
| 4331 | } | 4331 | } |
| 4332 | 4332 | ||
| 4333 | static int ftrace_module_notify_enter(struct notifier_block *self, | 4333 | void ftrace_module_init(struct module *mod) |
| 4334 | unsigned long val, void *data) | ||
| 4335 | { | 4334 | { |
| 4336 | struct module *mod = data; | 4335 | ftrace_init_module(mod, mod->ftrace_callsites, |
| 4337 | 4336 | mod->ftrace_callsites + | |
| 4338 | if (val == MODULE_STATE_COMING) | 4337 | mod->num_ftrace_callsites); |
| 4339 | ftrace_init_module(mod, mod->ftrace_callsites, | ||
| 4340 | mod->ftrace_callsites + | ||
| 4341 | mod->num_ftrace_callsites); | ||
| 4342 | return 0; | ||
| 4343 | } | 4338 | } |
| 4344 | 4339 | ||
| 4345 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4340 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| @@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4353 | return 0; | 4348 | return 0; |
| 4354 | } | 4349 | } |
| 4355 | #else | 4350 | #else |
| 4356 | static int ftrace_module_notify_enter(struct notifier_block *self, | ||
| 4357 | unsigned long val, void *data) | ||
| 4358 | { | ||
| 4359 | return 0; | ||
| 4360 | } | ||
| 4361 | static int ftrace_module_notify_exit(struct notifier_block *self, | 4351 | static int ftrace_module_notify_exit(struct notifier_block *self, |
| 4362 | unsigned long val, void *data) | 4352 | unsigned long val, void *data) |
| 4363 | { | 4353 | { |
| @@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self, | |||
| 4365 | } | 4355 | } |
| 4366 | #endif /* CONFIG_MODULES */ | 4356 | #endif /* CONFIG_MODULES */ |
| 4367 | 4357 | ||
| 4368 | struct notifier_block ftrace_module_enter_nb = { | ||
| 4369 | .notifier_call = ftrace_module_notify_enter, | ||
| 4370 | .priority = INT_MAX, /* Run before anything that can use kprobes */ | ||
| 4371 | }; | ||
| 4372 | |||
| 4373 | struct notifier_block ftrace_module_exit_nb = { | 4358 | struct notifier_block ftrace_module_exit_nb = { |
| 4374 | .notifier_call = ftrace_module_notify_exit, | 4359 | .notifier_call = ftrace_module_notify_exit, |
| 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| @@ -4403,10 +4388,6 @@ void __init ftrace_init(void) | |||
| 4403 | __start_mcount_loc, | 4388 | __start_mcount_loc, |
| 4404 | __stop_mcount_loc); | 4389 | __stop_mcount_loc); |
| 4405 | 4390 | ||
| 4406 | ret = register_module_notifier(&ftrace_module_enter_nb); | ||
| 4407 | if (ret) | ||
| 4408 | pr_warning("Failed to register trace ftrace module enter notifier\n"); | ||
| 4409 | |||
| 4410 | ret = register_module_notifier(&ftrace_module_exit_nb); | 4391 | ret = register_module_notifier(&ftrace_module_exit_nb); |
| 4411 | if (ret) | 4392 | if (ret) |
| 4412 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | 4393 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 925f537f07d1..4747b476a030 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec) | |||
| 77 | data->ops->func(data); | 77 | data->ops->func(data); |
| 78 | continue; | 78 | continue; |
| 79 | } | 79 | } |
| 80 | filter = rcu_dereference(data->filter); | 80 | filter = rcu_dereference_sched(data->filter); |
| 81 | if (filter && !filter_match_preds(filter, rec)) | 81 | if (filter && !filter_match_preds(filter, rec)) |
| 82 | continue; | 82 | continue; |
| 83 | if (data->cmd_ops->post_trigger) { | 83 | if (data->cmd_ops->post_trigger) { |
