diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 10 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 3 | ||||
-rw-r--r-- | kernel/sched/cpudeadline.c | 4 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 10 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 10 | ||||
-rw-r--r-- | kernel/tracepoint.c | 7 |
9 files changed, 41 insertions, 20 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..e6b1b66afe52 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
974 | * Temporarilly set tasks mems_allowed to target nodes of migration, | 974 | * Temporarilly set tasks mems_allowed to target nodes of migration, |
975 | * so that the migration code can allocate pages on these nodes. | 975 | * so that the migration code can allocate pages on these nodes. |
976 | * | 976 | * |
977 | * Call holding cpuset_mutex, so current's cpuset won't change | ||
978 | * during this call, as manage_mutex holds off any cpuset_attach() | ||
979 | * calls. Therefore we don't need to take task_lock around the | ||
980 | * call to guarantee_online_mems(), as we know no one is changing | ||
981 | * our task's cpuset. | ||
982 | * | ||
983 | * While the mm_struct we are migrating is typically from some | 977 | * While the mm_struct we are migrating is typically from some |
984 | * other task, the task_struct mems_allowed that we are hacking | 978 | * other task, the task_struct mems_allowed that we are hacking |
985 | * is for our current task, which must allocate new pages for that | 979 | * is for our current task, which must allocate new pages for that |
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
996 | 990 | ||
997 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 991 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
998 | 992 | ||
993 | rcu_read_lock(); | ||
999 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 994 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
1000 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); | 995 | guarantee_online_mems(mems_cs, &tsk->mems_allowed); |
996 | rcu_read_unlock(); | ||
1001 | } | 997 | } |
1002 | 998 | ||
1003 | /* | 999 | /* |
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
2486 | 2482 | ||
2487 | task_lock(current); | 2483 | task_lock(current); |
2488 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2484 | cs = nearest_hardwall_ancestor(task_cs(current)); |
2485 | allowed = node_isset(node, cs->mems_allowed); | ||
2489 | task_unlock(current); | 2486 | task_unlock(current); |
2490 | 2487 | ||
2491 | allowed = node_isset(node, cs->mems_allowed); | ||
2492 | mutex_unlock(&callback_mutex); | 2488 | mutex_unlock(&callback_mutex); |
2493 | return allowed; | 2489 | return allowed; |
2494 | } | 2490 | } |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
12 | #include <linux/of_address.h> | 12 | #include <linux/of_address.h> |
13 | #include <linux/of_irq.h> | ||
13 | #include <linux/topology.h> | 14 | #include <linux/topology.h> |
14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..d3bf660cb57f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, | |||
802 | 802 | ||
803 | static void wake_threads_waitq(struct irq_desc *desc) | 803 | static void wake_threads_waitq(struct irq_desc *desc) |
804 | { | 804 | { |
805 | if (atomic_dec_and_test(&desc->threads_active) && | 805 | if (atomic_dec_and_test(&desc->threads_active)) |
806 | waitqueue_active(&desc->wait_for_threads)) | ||
807 | wake_up(&desc->wait_for_threads); | 806 | wake_up(&desc->wait_for_threads); |
808 | } | 807 | } |
809 | 808 | ||
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b8838b56d1c..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) | |||
70 | 70 | ||
71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) | 71 | static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) |
72 | { | 72 | { |
73 | WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); | 73 | WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); |
74 | 74 | ||
75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { | 75 | if (dl_time_before(new_dl, cp->elements[idx].dl)) { |
76 | cp->elements[idx].dl = new_dl; | 76 | cp->elements[idx].dl = new_dl; |
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | out: | 119 | out: |
120 | WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); | 120 | WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
121 | 121 | ||
122 | return best_cpu; | 122 | return best_cpu; |
123 | } | 123 | } |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) | |||
135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 135 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
136 | { | 136 | { |
137 | struct task_struct *p = dl_task_of(dl_se); | 137 | struct task_struct *p = dl_task_of(dl_se); |
138 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
139 | 138 | ||
140 | if (p->nr_cpus_allowed > 1) | 139 | if (p->nr_cpus_allowed > 1) |
141 | dl_rq->dl_nr_migratory++; | 140 | dl_rq->dl_nr_migratory++; |
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |||
146 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | 145 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
147 | { | 146 | { |
148 | struct task_struct *p = dl_task_of(dl_se); | 147 | struct task_struct *p = dl_task_of(dl_se); |
149 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | ||
150 | 148 | ||
151 | if (p->nr_cpus_allowed > 1) | 149 | if (p->nr_cpus_allowed > 1) |
152 | dl_rq->dl_nr_migratory--; | 150 | dl_rq->dl_nr_migratory--; |
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |||
564 | return 1; | 562 | return 1; |
565 | } | 563 | } |
566 | 564 | ||
565 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | ||
566 | |||
567 | /* | 567 | /* |
568 | * Update the current task's runtime statistics (provided it is still | 568 | * Update the current task's runtime statistics (provided it is still |
569 | * a -deadline task and has not been removed from the dl_rq). | 569 | * a -deadline task and has not been removed from the dl_rq). |
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) | |||
627 | struct rt_rq *rt_rq = &rq->rt; | 627 | struct rt_rq *rt_rq = &rq->rt; |
628 | 628 | ||
629 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 629 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
630 | rt_rq->rt_time += delta_exec; | ||
631 | /* | 630 | /* |
632 | * We'll let actual RT tasks worry about the overflow here, we | 631 | * We'll let actual RT tasks worry about the overflow here, we |
633 | * have our own CBS to keep us inline -- see above. | 632 | * have our own CBS to keep us inline; only account when RT |
633 | * bandwidth is relevant. | ||
634 | */ | 634 | */ |
635 | if (sched_rt_bandwidth_account(rt_rq)) | ||
636 | rt_rq->rt_time += delta_exec; | ||
635 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
636 | } | 638 | } |
637 | } | 639 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78157099b167..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 7001 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
7002 | 7002 | ||
7003 | /* | 7003 | /* |
7004 | * Ensure the task's vruntime is normalized, so that when its | 7004 | * Ensure the task's vruntime is normalized, so that when it's |
7005 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7005 | * switched back to the fair class the enqueue_entity(.flags=0) will |
7006 | * do the right thing. | 7006 | * do the right thing. |
7007 | * | 7007 | * |
7008 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | 7008 | * If it's on_rq, then the dequeue_entity(.flags=0) will already |
7009 | * have normalized the vruntime, if it was !on_rq, then only when | 7009 | * have normalized the vruntime, if it's !on_rq, then only when |
7010 | * the task is sleeping will it still have non-normalized vruntime. | 7010 | * the task is sleeping will it still have non-normalized vruntime. |
7011 | */ | 7011 | */ |
7012 | if (!se->on_rq && p->state != TASK_RUNNING) { | 7012 | if (!p->on_rq && p->state != TASK_RUNNING) { |
7013 | /* | 7013 | /* |
7014 | * Fix up our vruntime so that the current sleep doesn't | 7014 | * Fix up our vruntime so that the current sleep doesn't |
7015 | * cause 'unlimited' sleep bonus. | 7015 | * cause 'unlimited' sleep bonus. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
538 | 538 | ||
539 | #endif /* CONFIG_RT_GROUP_SCHED */ | 539 | #endif /* CONFIG_RT_GROUP_SCHED */ |
540 | 540 | ||
541 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) | ||
542 | { | ||
543 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | ||
544 | |||
545 | return (hrtimer_active(&rt_b->rt_period_timer) || | ||
546 | rt_rq->rt_time < rt_b->rt_runtime); | ||
547 | } | ||
548 | |||
541 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
542 | /* | 550 | /* |
543 | * We ran out of runtime, see if we can borrow some from our neighbours. | 551 | * We ran out of runtime, see if we can borrow some from our neighbours. |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..f3989ceb5cd5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod) | |||
1777 | { | 1777 | { |
1778 | struct ftrace_event_call **call, **start, **end; | 1778 | struct ftrace_event_call **call, **start, **end; |
1779 | 1779 | ||
1780 | if (!mod->num_trace_events) | ||
1781 | return; | ||
1782 | |||
1783 | /* Don't add infrastructure for mods without tracepoints */ | ||
1784 | if (trace_module_has_bad_taint(mod)) { | ||
1785 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
1786 | mod->name); | ||
1787 | return; | ||
1788 | } | ||
1789 | |||
1780 | start = mod->trace_events; | 1790 | start = mod->trace_events; |
1781 | end = mod->trace_events + mod->num_trace_events; | 1791 | end = mod->trace_events + mod->num_trace_events; |
1782 | 1792 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..031cc5655a51 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
632 | 632 | ||
633 | #ifdef CONFIG_MODULES | 633 | #ifdef CONFIG_MODULES |
634 | bool trace_module_has_bad_taint(struct module *mod) | ||
635 | { | ||
636 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); | ||
637 | } | ||
638 | |||
634 | static int tracepoint_module_coming(struct module *mod) | 639 | static int tracepoint_module_coming(struct module *mod) |
635 | { | 640 | { |
636 | struct tp_module *tp_mod, *iter; | 641 | struct tp_module *tp_mod, *iter; |
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) | |||
641 | * module headers (for forced load), to make sure we don't cause a crash. | 646 | * module headers (for forced load), to make sure we don't cause a crash. |
642 | * Staging and out-of-tree GPL modules are fine. | 647 | * Staging and out-of-tree GPL modules are fine. |
643 | */ | 648 | */ |
644 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) | 649 | if (trace_module_has_bad_taint(mod)) |
645 | return 0; | 650 | return 0; |
646 | mutex_lock(&tracepoints_mutex); | 651 | mutex_lock(&tracepoints_mutex); |
647 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 652 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |