aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/irq/irqdesc.c14
-rw-r--r--kernel/irq/manage.c24
-rw-r--r--kernel/irq/spurious.c31
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--kernel/timer.c15
10 files changed, 96 insertions, 62 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d863b3c057bb..9efe7108ccaf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7402,26 +7402,12 @@ static int __perf_cgroup_move(void *info)
7402 return 0; 7402 return 0;
7403} 7403}
7404 7404
7405static void perf_cgroup_move(struct task_struct *task) 7405static void
7406perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
7406{ 7407{
7407 task_function_call(task, __perf_cgroup_move, task); 7408 task_function_call(task, __perf_cgroup_move, task);
7408} 7409}
7409 7410
7410static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7411 struct cgroup *old_cgrp, struct task_struct *task,
7412 bool threadgroup)
7413{
7414 perf_cgroup_move(task);
7415 if (threadgroup) {
7416 struct task_struct *c;
7417 rcu_read_lock();
7418 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
7419 perf_cgroup_move(c);
7420 }
7421 rcu_read_unlock();
7422 }
7423}
7424
7425static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7411static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7426 struct cgroup *old_cgrp, struct task_struct *task) 7412 struct cgroup *old_cgrp, struct task_struct *task)
7427{ 7413{
@@ -7433,7 +7419,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7433 if (!(task->flags & PF_EXITING)) 7419 if (!(task->flags & PF_EXITING))
7434 return; 7420 return;
7435 7421
7436 perf_cgroup_move(task); 7422 perf_cgroup_attach_task(cgrp, task);
7437} 7423}
7438 7424
7439struct cgroup_subsys perf_subsys = { 7425struct cgroup_subsys perf_subsys = {
@@ -7442,6 +7428,6 @@ struct cgroup_subsys perf_subsys = {
7442 .create = perf_cgroup_create, 7428 .create = perf_cgroup_create,
7443 .destroy = perf_cgroup_destroy, 7429 .destroy = perf_cgroup_destroy,
7444 .exit = perf_cgroup_exit, 7430 .exit = perf_cgroup_exit,
7445 .attach = perf_cgroup_attach, 7431 .attach_task = perf_cgroup_attach_task,
7446}; 7432};
7447#endif /* CONFIG_CGROUP_PERF */ 7433#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 90cb55f6d7eb..470d08c82bbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,12 +133,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
133 switch (res) { 133 switch (res) {
134 case IRQ_WAKE_THREAD: 134 case IRQ_WAKE_THREAD:
135 /* 135 /*
136 * Set result to handled so the spurious check
137 * does not trigger.
138 */
139 res = IRQ_HANDLED;
140
141 /*
142 * Catch drivers which return WAKE_THREAD but 136 * Catch drivers which return WAKE_THREAD but
143 * did not set up a thread function 137 * did not set up a thread function
144 */ 138 */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 886e80347b32..4c60a50e66b2 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -257,13 +257,11 @@ int __init early_irq_init(void)
257 count = ARRAY_SIZE(irq_desc); 257 count = ARRAY_SIZE(irq_desc);
258 258
259 for (i = 0; i < count; i++) { 259 for (i = 0; i < count; i++) {
260 desc[i].irq_data.irq = i;
261 desc[i].irq_data.chip = &no_irq_chip;
262 desc[i].kstat_irqs = alloc_percpu(unsigned int); 260 desc[i].kstat_irqs = alloc_percpu(unsigned int);
263 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 261 alloc_masks(&desc[i], GFP_KERNEL, node);
264 alloc_masks(desc + i, GFP_KERNEL, node); 262 raw_spin_lock_init(&desc[i].lock);
265 desc_smp_init(desc + i, node);
266 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 263 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 desc_set_defaults(i, &desc[i], node);
267 } 265 }
268 return arch_early_irq_init(); 266 return arch_early_irq_init();
269} 267}
@@ -346,6 +344,12 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
346 if (!cnt) 344 if (!cnt)
347 return -EINVAL; 345 return -EINVAL;
348 346
347 if (irq >= 0) {
348 if (from > irq)
349 return -EINVAL;
350 from = irq;
351 }
352
349 mutex_lock(&sparse_irq_lock); 353 mutex_lock(&sparse_irq_lock);
350 354
351 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 355 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f7ce0021e1c4..d64bafb1afd0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -723,13 +723,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
723 * context. So we need to disable bh here to avoid deadlocks and other 723 * context. So we need to disable bh here to avoid deadlocks and other
724 * side effects. 724 * side effects.
725 */ 725 */
726static void 726static irqreturn_t
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{ 728{
729 irqreturn_t ret;
730
729 local_bh_disable(); 731 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id); 732 ret = action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false); 733 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable(); 734 local_bh_enable();
735 return ret;
733} 736}
734 737
735/* 738/*
@@ -737,10 +740,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 * preemtible - many of them need to sleep and wait for slow busses to 740 * preemtible - many of them need to sleep and wait for slow busses to
738 * complete. 741 * complete.
739 */ 742 */
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 743static irqreturn_t irq_thread_fn(struct irq_desc *desc,
744 struct irqaction *action)
741{ 745{
742 action->thread_fn(action->irq, action->dev_id); 746 irqreturn_t ret;
747
748 ret = action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false); 749 irq_finalize_oneshot(desc, action, false);
750 return ret;
744} 751}
745 752
746/* 753/*
@@ -753,7 +760,8 @@ static int irq_thread(void *data)
753 }; 760 };
754 struct irqaction *action = data; 761 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq); 762 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 763 irqreturn_t (*handler_fn)(struct irq_desc *desc,
764 struct irqaction *action);
757 int wake; 765 int wake;
758 766
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 767 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +791,12 @@ static int irq_thread(void *data)
783 desc->istate |= IRQS_PENDING; 791 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock); 792 raw_spin_unlock_irq(&desc->lock);
785 } else { 793 } else {
794 irqreturn_t action_ret;
795
786 raw_spin_unlock_irq(&desc->lock); 796 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action); 797 action_ret = handler_fn(desc, action);
798 if (!noirqdebug)
799 note_interrupt(action->irq, desc, action_ret);
788 } 800 }
789 801
790 wake = atomic_dec_and_test(&desc->threads_active); 802 wake = atomic_dec_and_test(&desc->threads_active);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dfbd550401b2..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -167,6 +167,13 @@ out:
167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
168} 168}
169 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
170/* 177/*
171 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
172 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
182 struct irqaction *action; 189 struct irqaction *action;
183 unsigned long flags; 190 unsigned long flags;
184 191
185 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
186 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
187 irq, action_ret); 194 irq, action_ret);
188 } else { 195 } else {
@@ -201,10 +208,11 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
201 raw_spin_lock_irqsave(&desc->lock, flags); 208 raw_spin_lock_irqsave(&desc->lock, flags);
202 action = desc->action; 209 action = desc->action;
203 while (action) { 210 while (action) {
204 printk(KERN_ERR "[<%p>]", action->handler); 211 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
205 print_symbol(" (%s)", 212 if (action->thread_fn)
206 (unsigned long)action->handler); 213 printk(KERN_CONT " threaded [<%p>] %pf",
207 printk("\n"); 214 action->thread_fn, action->thread_fn);
215 printk(KERN_CONT "\n");
208 action = action->next; 216 action = action->next;
209 } 217 }
210 raw_spin_unlock_irqrestore(&desc->lock, flags); 218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -262,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
262 if (desc->istate & IRQS_POLL_INPROGRESS) 270 if (desc->istate & IRQS_POLL_INPROGRESS)
263 return; 271 return;
264 272
265 if (unlikely(action_ret != IRQ_HANDLED)) { 273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
266 /* 283 /*
267 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
268 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -274,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
274 else 291 else
275 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
276 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
277 if (unlikely(action_ret != IRQ_NONE))
278 report_bad_irq(irq, desc, action_ret);
279 } 294 }
280 295
281 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac8..298c9276dfdb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3426 int ret = 0;
3427 3427
3428 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3429 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3430 3430
3431 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3432 check_flags(flags); 3432 check_flags(flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index cbb3a0eee58e..3f2e502d609b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
605/* 605/*
606 * Return the group to which this tasks belongs. 606 * Return the group to which this tasks belongs.
607 * 607 *
608 * We use task_subsys_state_check() and extend the RCU verification 608 * We use task_subsys_state_check() and extend the RCU verification with
609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() 609 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
610 * holds that lock for each task it moves into the cgroup. Therefore 610 * task it moves into the cgroup. Therefore by holding either of those locks,
611 * by holding that lock, we pin the task to the current cgroup. 611 * we pin the task to the current cgroup.
612 */ 612 */
613static inline struct task_group *task_group(struct task_struct *p) 613static inline struct task_group *task_group(struct task_struct *p)
614{ 614{
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
616 struct cgroup_subsys_state *css; 616 struct cgroup_subsys_state *css;
617 617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
619 lockdep_is_held(&p->pi_lock)); 619 lockdep_is_held(&p->pi_lock) ||
620 lockdep_is_held(&task_rq(p)->lock));
620 tg = container_of(css, struct task_group, css); 621 tg = container_of(css, struct task_group, css);
621 622
622 return autogroup_task_group(p, tg); 623 return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2201 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2201 2202
2202#ifdef CONFIG_LOCKDEP 2203#ifdef CONFIG_LOCKDEP
2204 /*
2205 * The caller should hold either p->pi_lock or rq->lock, when changing
2206 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2207 *
2208 * sched_move_task() holds both and thus holding either pins the cgroup,
2209 * see set_task_rq().
2210 *
2211 * Furthermore, all task_rq users should acquire both locks, see
2212 * task_rq_lock().
2213 */
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2214 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock))); 2215 lockdep_is_held(&task_rq(p)->lock)));
2205#endif 2216#endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2447 } 2458 }
2448 rcu_read_unlock(); 2459 rcu_read_unlock();
2449 } 2460 }
2461
2462 if (wake_flags & WF_MIGRATED)
2463 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2464
2450#endif /* CONFIG_SMP */ 2465#endif /* CONFIG_SMP */
2451 2466
2452 schedstat_inc(rq, ttwu_count); 2467 schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2455 if (wake_flags & WF_SYNC) 2470 if (wake_flags & WF_SYNC)
2456 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2471 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2457 2472
2458 if (cpu != task_cpu(p))
2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2460
2461#endif /* CONFIG_SCHEDSTATS */ 2473#endif /* CONFIG_SCHEDSTATS */
2462} 2474}
2463 2475
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2600 2612
2601#if defined(CONFIG_SMP) 2613#if defined(CONFIG_SMP)
2602 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2614 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2615 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2603 ttwu_queue_remote(p, cpu); 2616 ttwu_queue_remote(p, cpu);
2604 return; 2617 return;
2605 } 2618 }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2674 p->sched_class->task_waking(p); 2687 p->sched_class->task_waking(p);
2675 2688
2676 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2689 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2677 if (task_cpu(p) != cpu) 2690 if (task_cpu(p) != cpu) {
2691 wake_flags |= WF_MIGRATED;
2678 set_task_cpu(p, cpu); 2692 set_task_cpu(p, cpu);
2693 }
2679#endif /* CONFIG_SMP */ 2694#endif /* CONFIG_SMP */
2680 2695
2681 ttwu_queue(p, cpu); 2696 ttwu_queue(p, cpu);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4fc92445a29c..f175d98bd355 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -938,6 +938,12 @@ static struct ctl_table kern_table[] = {
938 }, 938 },
939#endif 939#endif
940#ifdef CONFIG_PERF_EVENTS 940#ifdef CONFIG_PERF_EVENTS
941 /*
942 * User-space scripts rely on the existence of this file
943 * as a feature check for perf_events being enabled.
944 *
945 * So it's an ABI, do not remove!
946 */
941 { 947 {
942 .procname = "perf_event_paranoid", 948 .procname = "perf_event_paranoid",
943 .data = &sysctl_perf_event_paranoid, 949 .data = &sysctl_perf_event_paranoid,
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c027d4f602f1..e4c699dfa4e8 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -182,7 +182,10 @@ void clockevents_register_device(struct clock_event_device *dev)
182 unsigned long flags; 182 unsigned long flags;
183 183
184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
185 BUG_ON(!dev->cpumask); 185 if (!dev->cpumask) {
186 WARN_ON(num_possible_cpus() > 1);
187 dev->cpumask = cpumask_of(smp_processor_id());
188 }
186 189
187 raw_spin_lock_irqsave(&clockevents_lock, flags); 190 raw_spin_lock_irqsave(&clockevents_lock, flags);
188 191
diff --git a/kernel/timer.c b/kernel/timer.c
index fd6198692b57..8cff36119e4d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -749,16 +749,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
749 unsigned long expires_limit, mask; 749 unsigned long expires_limit, mask;
750 int bit; 750 int bit;
751 751
752 expires_limit = expires;
753
754 if (timer->slack >= 0) { 752 if (timer->slack >= 0) {
755 expires_limit = expires + timer->slack; 753 expires_limit = expires + timer->slack;
756 } else { 754 } else {
757 unsigned long now = jiffies; 755 long delta = expires - jiffies;
756
757 if (delta < 256)
758 return expires;
758 759
759 /* No slack, if already expired else auto slack 0.4% */ 760 expires_limit = expires + delta / 256;
760 if (time_after(expires, now))
761 expires_limit = expires + (expires - now)/256;
762 } 761 }
763 mask = expires ^ expires_limit; 762 mask = expires ^ expires_limit;
764 if (mask == 0) 763 if (mask == 0)
@@ -795,6 +794,8 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
795 */ 794 */
796int mod_timer(struct timer_list *timer, unsigned long expires) 795int mod_timer(struct timer_list *timer, unsigned long expires)
797{ 796{
797 expires = apply_slack(timer, expires);
798
798 /* 799 /*
799 * This is a common optimization triggered by the 800 * This is a common optimization triggered by the
800 * networking code - if the timer is re-modified 801 * networking code - if the timer is re-modified
@@ -803,8 +804,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
803 if (timer_pending(timer) && timer->expires == expires) 804 if (timer_pending(timer) && timer->expires == expires)
804 return 1; 805 return 1;
805 806
806 expires = apply_slack(timer, expires);
807
808 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 807 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
809} 808}
810EXPORT_SYMBOL(mod_timer); 809EXPORT_SYMBOL(mod_timer);