diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 35 | ||||
-rw-r--r-- | kernel/cpuset.c | 8 | ||||
-rw-r--r-- | kernel/events/core.c | 8 | ||||
-rw-r--r-- | kernel/extable.c | 4 | ||||
-rw-r--r-- | kernel/irq/pm.c | 2 | ||||
-rw-r--r-- | kernel/padata.c | 9 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 8 | ||||
-rw-r--r-- | kernel/sched/fair.c | 27 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 15 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 25 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 5 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 64 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 10 | ||||
-rw-r--r-- | kernel/workqueue.c | 50 |
18 files changed, 186 insertions, 101 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 4c62513fe19f..8b729c278b64 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex); | |||
90 | static DEFINE_MUTEX(cgroup_root_mutex); | 90 | static DEFINE_MUTEX(cgroup_root_mutex); |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * cgroup destruction makes heavy use of work items and there can be a lot | ||
94 | * of concurrent destructions. Use a separate workqueue so that cgroup | ||
95 | * destruction work items don't end up filling up max_active of system_wq | ||
96 | * which may lead to deadlock. | ||
97 | */ | ||
98 | static struct workqueue_struct *cgroup_destroy_wq; | ||
99 | |||
100 | /* | ||
93 | * Generate an array of cgroup subsystem pointers. At boot time, this is | 101 | * Generate an array of cgroup subsystem pointers. At boot time, this is |
94 | * populated with the built in subsystems, and modular subsystems are | 102 | * populated with the built in subsystems, and modular subsystems are |
95 | * registered after that. The mutable section of this array is protected by | 103 | * registered after that. The mutable section of this array is protected by |
@@ -191,6 +199,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp); | |||
191 | static int cgroup_destroy_locked(struct cgroup *cgrp); | 199 | static int cgroup_destroy_locked(struct cgroup *cgrp); |
192 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | 200 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
193 | bool is_add); | 201 | bool is_add); |
202 | static int cgroup_file_release(struct inode *inode, struct file *file); | ||
194 | 203 | ||
195 | /** | 204 | /** |
196 | * cgroup_css - obtain a cgroup's css for the specified subsystem | 205 | * cgroup_css - obtain a cgroup's css for the specified subsystem |
@@ -871,7 +880,7 @@ static void cgroup_free_rcu(struct rcu_head *head) | |||
871 | struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); | 880 | struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); |
872 | 881 | ||
873 | INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); | 882 | INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); |
874 | schedule_work(&cgrp->destroy_work); | 883 | queue_work(cgroup_destroy_wq, &cgrp->destroy_work); |
875 | } | 884 | } |
876 | 885 | ||
877 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) | 886 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) |
@@ -2421,7 +2430,7 @@ static const struct file_operations cgroup_seqfile_operations = { | |||
2421 | .read = seq_read, | 2430 | .read = seq_read, |
2422 | .write = cgroup_file_write, | 2431 | .write = cgroup_file_write, |
2423 | .llseek = seq_lseek, | 2432 | .llseek = seq_lseek, |
2424 | .release = single_release, | 2433 | .release = cgroup_file_release, |
2425 | }; | 2434 | }; |
2426 | 2435 | ||
2427 | static int cgroup_file_open(struct inode *inode, struct file *file) | 2436 | static int cgroup_file_open(struct inode *inode, struct file *file) |
@@ -2482,6 +2491,8 @@ static int cgroup_file_release(struct inode *inode, struct file *file) | |||
2482 | ret = cft->release(inode, file); | 2491 | ret = cft->release(inode, file); |
2483 | if (css->ss) | 2492 | if (css->ss) |
2484 | css_put(css); | 2493 | css_put(css); |
2494 | if (file->f_op == &cgroup_seqfile_operations) | ||
2495 | single_release(inode, file); | ||
2485 | return ret; | 2496 | return ret; |
2486 | } | 2497 | } |
2487 | 2498 | ||
@@ -4249,7 +4260,7 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head) | |||
4249 | * css_put(). dput() requires process context which we don't have. | 4260 | * css_put(). dput() requires process context which we don't have. |
4250 | */ | 4261 | */ |
4251 | INIT_WORK(&css->destroy_work, css_free_work_fn); | 4262 | INIT_WORK(&css->destroy_work, css_free_work_fn); |
4252 | schedule_work(&css->destroy_work); | 4263 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
4253 | } | 4264 | } |
4254 | 4265 | ||
4255 | static void css_release(struct percpu_ref *ref) | 4266 | static void css_release(struct percpu_ref *ref) |
@@ -4539,7 +4550,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref) | |||
4539 | container_of(ref, struct cgroup_subsys_state, refcnt); | 4550 | container_of(ref, struct cgroup_subsys_state, refcnt); |
4540 | 4551 | ||
4541 | INIT_WORK(&css->destroy_work, css_killed_work_fn); | 4552 | INIT_WORK(&css->destroy_work, css_killed_work_fn); |
4542 | schedule_work(&css->destroy_work); | 4553 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
4543 | } | 4554 | } |
4544 | 4555 | ||
4545 | /** | 4556 | /** |
@@ -5063,6 +5074,22 @@ out: | |||
5063 | return err; | 5074 | return err; |
5064 | } | 5075 | } |
5065 | 5076 | ||
5077 | static int __init cgroup_wq_init(void) | ||
5078 | { | ||
5079 | /* | ||
5080 | * There isn't much point in executing destruction path in | ||
5081 | * parallel. Good chunk is serialized with cgroup_mutex anyway. | ||
5082 | * Use 1 for @max_active. | ||
5083 | * | ||
5084 | * We would prefer to do this in cgroup_init() above, but that | ||
5085 | * is called before init_workqueues(): so leave this until after. | ||
5086 | */ | ||
5087 | cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); | ||
5088 | BUG_ON(!cgroup_destroy_wq); | ||
5089 | return 0; | ||
5090 | } | ||
5091 | core_initcall(cgroup_wq_init); | ||
5092 | |||
5066 | /* | 5093 | /* |
5067 | * proc_cgroup_show() | 5094 | * proc_cgroup_show() |
5068 | * - Print task's cgroup paths into seq_file, one line for each hierarchy | 5095 | * - Print task's cgroup paths into seq_file, one line for each hierarchy |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 6bf981e13c43..4772034b4b17 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
1033 | need_loop = task_has_mempolicy(tsk) || | 1033 | need_loop = task_has_mempolicy(tsk) || |
1034 | !nodes_intersects(*newmems, tsk->mems_allowed); | 1034 | !nodes_intersects(*newmems, tsk->mems_allowed); |
1035 | 1035 | ||
1036 | if (need_loop) | 1036 | if (need_loop) { |
1037 | local_irq_disable(); | ||
1037 | write_seqcount_begin(&tsk->mems_allowed_seq); | 1038 | write_seqcount_begin(&tsk->mems_allowed_seq); |
1039 | } | ||
1038 | 1040 | ||
1039 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); | 1041 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
1040 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); | 1042 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); |
@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
1042 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); | 1044 | mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); |
1043 | tsk->mems_allowed = *newmems; | 1045 | tsk->mems_allowed = *newmems; |
1044 | 1046 | ||
1045 | if (need_loop) | 1047 | if (need_loop) { |
1046 | write_seqcount_end(&tsk->mems_allowed_seq); | 1048 | write_seqcount_end(&tsk->mems_allowed_seq); |
1049 | local_irq_enable(); | ||
1050 | } | ||
1047 | 1051 | ||
1048 | task_unlock(tsk); | 1052 | task_unlock(tsk); |
1049 | } | 1053 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d724e7757cd1..72348dc192c1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5680,11 +5680,6 @@ static void swevent_hlist_put(struct perf_event *event) | |||
5680 | { | 5680 | { |
5681 | int cpu; | 5681 | int cpu; |
5682 | 5682 | ||
5683 | if (event->cpu != -1) { | ||
5684 | swevent_hlist_put_cpu(event, event->cpu); | ||
5685 | return; | ||
5686 | } | ||
5687 | |||
5688 | for_each_possible_cpu(cpu) | 5683 | for_each_possible_cpu(cpu) |
5689 | swevent_hlist_put_cpu(event, cpu); | 5684 | swevent_hlist_put_cpu(event, cpu); |
5690 | } | 5685 | } |
@@ -5718,9 +5713,6 @@ static int swevent_hlist_get(struct perf_event *event) | |||
5718 | int err; | 5713 | int err; |
5719 | int cpu, failed_cpu; | 5714 | int cpu, failed_cpu; |
5720 | 5715 | ||
5721 | if (event->cpu != -1) | ||
5722 | return swevent_hlist_get_cpu(event, event->cpu); | ||
5723 | |||
5724 | get_online_cpus(); | 5716 | get_online_cpus(); |
5725 | for_each_possible_cpu(cpu) { | 5717 | for_each_possible_cpu(cpu) { |
5726 | err = swevent_hlist_get_cpu(event, cpu); | 5718 | err = swevent_hlist_get_cpu(event, cpu); |
diff --git a/kernel/extable.c b/kernel/extable.c index 832cb28105bb..763faf037ec1 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -61,7 +61,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
61 | static inline int init_kernel_text(unsigned long addr) | 61 | static inline int init_kernel_text(unsigned long addr) |
62 | { | 62 | { |
63 | if (addr >= (unsigned long)_sinittext && | 63 | if (addr >= (unsigned long)_sinittext && |
64 | addr <= (unsigned long)_einittext) | 64 | addr < (unsigned long)_einittext) |
65 | return 1; | 65 | return 1; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr) | |||
69 | int core_kernel_text(unsigned long addr) | 69 | int core_kernel_text(unsigned long addr) |
70 | { | 70 | { |
71 | if (addr >= (unsigned long)_stext && | 71 | if (addr >= (unsigned long)_stext && |
72 | addr <= (unsigned long)_etext) | 72 | addr < (unsigned long)_etext) |
73 | return 1; | 73 | return 1; |
74 | 74 | ||
75 | if (system_state == SYSTEM_BOOTING && | 75 | if (system_state == SYSTEM_BOOTING && |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index cb228bf21760..abcd6ca86cb7 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early) | |||
50 | bool is_early = desc->action && | 50 | bool is_early = desc->action && |
51 | desc->action->flags & IRQF_EARLY_RESUME; | 51 | desc->action->flags & IRQF_EARLY_RESUME; |
52 | 52 | ||
53 | if (is_early != want_early) | 53 | if (!is_early && want_early) |
54 | continue; | 54 | continue; |
55 | 55 | ||
56 | raw_spin_lock_irqsave(&desc->lock, flags); | 56 | raw_spin_lock_irqsave(&desc->lock, flags); |
diff --git a/kernel/padata.c b/kernel/padata.c index 07af2c95dcfe..2abd25d79cc8 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | |||
46 | 46 | ||
47 | static int padata_cpu_hash(struct parallel_data *pd) | 47 | static int padata_cpu_hash(struct parallel_data *pd) |
48 | { | 48 | { |
49 | unsigned int seq_nr; | ||
49 | int cpu_index; | 50 | int cpu_index; |
50 | 51 | ||
51 | /* | 52 | /* |
@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd) | |||
53 | * seq_nr mod. number of cpus in use. | 54 | * seq_nr mod. number of cpus in use. |
54 | */ | 55 | */ |
55 | 56 | ||
56 | spin_lock(&pd->seq_lock); | 57 | seq_nr = atomic_inc_return(&pd->seq_nr); |
57 | cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); | 58 | cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
58 | pd->seq_nr++; | ||
59 | spin_unlock(&pd->seq_lock); | ||
60 | 59 | ||
61 | return padata_index_to_cpu(pd, cpu_index); | 60 | return padata_index_to_cpu(pd, cpu_index); |
62 | } | 61 | } |
@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
429 | padata_init_pqueues(pd); | 428 | padata_init_pqueues(pd); |
430 | padata_init_squeues(pd); | 429 | padata_init_squeues(pd); |
431 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 430 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
432 | pd->seq_nr = 0; | 431 | atomic_set(&pd->seq_nr, -1); |
433 | atomic_set(&pd->reorder_objects, 0); | 432 | atomic_set(&pd->reorder_objects, 0); |
434 | atomic_set(&pd->refcnt, 0); | 433 | atomic_set(&pd->refcnt, 0); |
435 | pd->pinst = pinst; | 434 | pd->pinst = pinst; |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6abb03dff5c0..08a765232432 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644); | |||
1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; | 1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; |
1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); | 1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); |
1634 | 1634 | ||
1635 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_active; |
1636 | 1636 | ||
1637 | /* | 1637 | /* |
1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
1729 | int tne; | 1729 | int tne; |
1730 | 1730 | ||
1731 | /* Handle nohz enablement switches conservatively. */ | 1731 | /* Handle nohz enablement switches conservatively. */ |
1732 | tne = ACCESS_ONCE(tick_nohz_enabled); | 1732 | tne = ACCESS_ONCE(tick_nohz_active); |
1733 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1733 | if (tne != rdtp->tick_nohz_enabled_snap) { |
1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) | 1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) |
1735 | invoke_rcu_core(); /* force nohz to see update. */ | 1735 | invoke_rcu_core(); /* force nohz to see update. */ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c1808606ee5f..e85cda20ab2b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
2660 | } while (need_resched()); | 2660 | } while (need_resched()); |
2661 | } | 2661 | } |
2662 | EXPORT_SYMBOL(preempt_schedule); | 2662 | EXPORT_SYMBOL(preempt_schedule); |
2663 | #endif /* CONFIG_PREEMPT */ | ||
2663 | 2664 | ||
2664 | /* | 2665 | /* |
2665 | * this is the entry point to schedule() from kernel preemption | 2666 | * this is the entry point to schedule() from kernel preemption |
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
2693 | exception_exit(prev_state); | 2694 | exception_exit(prev_state); |
2694 | } | 2695 | } |
2695 | 2696 | ||
2696 | #endif /* CONFIG_PREEMPT */ | ||
2697 | |||
2698 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | 2697 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
2699 | void *key) | 2698 | void *key) |
2700 | { | 2699 | { |
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
4762 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 4761 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
4763 | 4762 | ||
4764 | /* | 4763 | /* |
4765 | * If we dont want to free the old_rt yet then | 4764 | * If we dont want to free the old_rd yet then |
4766 | * set old_rd to NULL to skip the freeing later | 4765 | * set old_rd to NULL to skip the freeing later |
4767 | * in this function: | 4766 | * in this function: |
4768 | */ | 4767 | */ |
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu) | |||
4910 | if (sd) { | 4909 | if (sd) { |
4911 | id = cpumask_first(sched_domain_span(sd)); | 4910 | id = cpumask_first(sched_domain_span(sd)); |
4912 | size = cpumask_weight(sched_domain_span(sd)); | 4911 | size = cpumask_weight(sched_domain_span(sd)); |
4913 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); | 4912 | sd = sd->parent; /* sd_busy */ |
4914 | } | 4913 | } |
4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | ||
4915 | 4915 | ||
4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
4917 | per_cpu(sd_llc_size, cpu) = size; | 4917 | per_cpu(sd_llc_size, cpu) = size; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8b652ebe027..fd773ade1a31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
5379 | */ | 5379 | */ |
5380 | 5380 | ||
5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { | 5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { |
5382 | struct sched_group *sg = cpu_rq(cpu)->sd->groups; | 5382 | struct sched_group_power *sgp; |
5383 | struct rq *rq = cpu_rq(cpu); | ||
5383 | 5384 | ||
5384 | power_orig += sg->sgp->power_orig; | 5385 | /* |
5385 | power += sg->sgp->power; | 5386 | * build_sched_domains() -> init_sched_groups_power() |
5387 | * gets here before we've attached the domains to the | ||
5388 | * runqueues. | ||
5389 | * | ||
5390 | * Use power_of(), which is set irrespective of domains | ||
5391 | * in update_cpu_power(). | ||
5392 | * | ||
5393 | * This avoids power/power_orig from being 0 and | ||
5394 | * causing divide-by-zero issues on boot. | ||
5395 | * | ||
5396 | * Runtime updates will correct power_orig. | ||
5397 | */ | ||
5398 | if (unlikely(!rq->sd)) { | ||
5399 | power_orig += power_of(cpu); | ||
5400 | power += power_of(cpu); | ||
5401 | continue; | ||
5402 | } | ||
5403 | |||
5404 | sgp = rq->sd->groups->sgp; | ||
5405 | power_orig += sgp->power_orig; | ||
5406 | power += sgp->power; | ||
5386 | } | 5407 | } |
5387 | } else { | 5408 | } else { |
5388 | /* | 5409 | /* |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0e..162b03ab0ad2 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | |||
37 | /* | ||
38 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | ||
39 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | ||
40 | * variable has two functions: | ||
41 | * | ||
42 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | ||
43 | * timekeeping lock all at once. Only the CPU which is assigned to do the | ||
44 | * update is handling it. | ||
45 | * | ||
46 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | ||
47 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | ||
48 | * at it will take over and keep the time keeping alive. The handover | ||
49 | * procedure also covers cpu hotplug. | ||
50 | */ | ||
36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 51 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | 52 | ||
38 | /* | 53 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f834..ea20f7d1ac2c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void) | |||
361 | /* | 361 | /* |
362 | * NO HZ enabled ? | 362 | * NO HZ enabled ? |
363 | */ | 363 | */ |
364 | int tick_nohz_enabled __read_mostly = 1; | 364 | static int tick_nohz_enabled __read_mostly = 1; |
365 | 365 | int tick_nohz_active __read_mostly; | |
366 | /* | 366 | /* |
367 | * Enable / Disable tickless mode | 367 | * Enable / Disable tickless mode |
368 | */ | 368 | */ |
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
466 | ktime_t now, idle; | 466 | ktime_t now, idle; |
467 | 467 | ||
468 | if (!tick_nohz_enabled) | 468 | if (!tick_nohz_active) |
469 | return -1; | 469 | return -1; |
470 | 470 | ||
471 | now = ktime_get(); | 471 | now = ktime_get(); |
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
507 | ktime_t now, iowait; | 507 | ktime_t now, iowait; |
508 | 508 | ||
509 | if (!tick_nohz_enabled) | 509 | if (!tick_nohz_active) |
510 | return -1; | 510 | return -1; |
511 | 511 | ||
512 | now = ktime_get(); | 512 | now = ktime_get(); |
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
711 | return false; | 711 | return false; |
712 | } | 712 | } |
713 | 713 | ||
714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
715 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | ||
715 | return false; | 716 | return false; |
717 | } | ||
716 | 718 | ||
717 | if (need_resched()) | 719 | if (need_resched()) |
718 | return false; | 720 | return false; |
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void) | |||
799 | local_irq_disable(); | 801 | local_irq_disable(); |
800 | 802 | ||
801 | ts = &__get_cpu_var(tick_cpu_sched); | 803 | ts = &__get_cpu_var(tick_cpu_sched); |
802 | /* | ||
803 | * set ts->inidle unconditionally. even if the system did not | ||
804 | * switch to nohz mode the cpu frequency governers rely on the | ||
805 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
806 | */ | ||
807 | ts->inidle = 1; | 804 | ts->inidle = 1; |
808 | __tick_nohz_idle_enter(ts); | 805 | __tick_nohz_idle_enter(ts); |
809 | 806 | ||
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
973 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 970 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
974 | ktime_t next; | 971 | ktime_t next; |
975 | 972 | ||
976 | if (!tick_nohz_enabled) | 973 | if (!tick_nohz_active) |
977 | return; | 974 | return; |
978 | 975 | ||
979 | local_irq_disable(); | 976 | local_irq_disable(); |
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
981 | local_irq_enable(); | 978 | local_irq_enable(); |
982 | return; | 979 | return; |
983 | } | 980 | } |
984 | 981 | tick_nohz_active = 1; | |
985 | ts->nohz_mode = NOHZ_MODE_LOWRES; | 982 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
986 | 983 | ||
987 | /* | 984 | /* |
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void) | |||
1139 | } | 1136 | } |
1140 | 1137 | ||
1141 | #ifdef CONFIG_NO_HZ_COMMON | 1138 | #ifdef CONFIG_NO_HZ_COMMON |
1142 | if (tick_nohz_enabled) | 1139 | if (tick_nohz_enabled) { |
1143 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 1140 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
1141 | tick_nohz_active = 1; | ||
1142 | } | ||
1144 | #endif | 1143 | #endif |
1145 | } | 1144 | } |
1146 | #endif /* HIGH_RES_TIMERS */ | 1145 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3abf53418b67..87b4f00284c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1347 | tk->xtime_nsec -= remainder; | 1347 | tk->xtime_nsec -= remainder; |
1348 | tk->xtime_nsec += 1ULL << tk->shift; | 1348 | tk->xtime_nsec += 1ULL << tk->shift; |
1349 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1349 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1350 | 1350 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | |
1351 | } | 1351 | } |
1352 | #else | 1352 | #else |
1353 | #define old_vsyscall_fixup(tk) | 1353 | #define old_vsyscall_fixup(tk) |
diff --git a/kernel/timer.c b/kernel/timer.c index 6582b82fa966..accfd241b9e5 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu) | |||
1518 | /* | 1518 | /* |
1519 | * The APs use this path later in boot | 1519 | * The APs use this path later in boot |
1520 | */ | 1520 | */ |
1521 | base = kmalloc_node(sizeof(*base), | 1521 | base = kzalloc_node(sizeof(*base), GFP_KERNEL, |
1522 | GFP_KERNEL | __GFP_ZERO, | 1522 | cpu_to_node(cpu)); |
1523 | cpu_to_node(cpu)); | ||
1524 | if (!base) | 1523 | if (!base) |
1525 | return -ENOMEM; | 1524 | return -ENOMEM; |
1526 | 1525 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 22fa55696760..0e9f9eaade2f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
367 | 367 | ||
368 | static int __register_ftrace_function(struct ftrace_ops *ops) | 368 | static int __register_ftrace_function(struct ftrace_ops *ops) |
369 | { | 369 | { |
370 | if (unlikely(ftrace_disabled)) | ||
371 | return -ENODEV; | ||
372 | |||
373 | if (FTRACE_WARN_ON(ops == &global_ops)) | 370 | if (FTRACE_WARN_ON(ops == &global_ops)) |
374 | return -EINVAL; | 371 | return -EINVAL; |
375 | 372 | ||
@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
428 | { | 425 | { |
429 | int ret; | 426 | int ret; |
430 | 427 | ||
431 | if (ftrace_disabled) | ||
432 | return -ENODEV; | ||
433 | |||
434 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | 428 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
435 | return -EBUSY; | 429 | return -EBUSY; |
436 | 430 | ||
@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command) | |||
2088 | static int ftrace_startup(struct ftrace_ops *ops, int command) | 2082 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
2089 | { | 2083 | { |
2090 | bool hash_enable = true; | 2084 | bool hash_enable = true; |
2085 | int ret; | ||
2091 | 2086 | ||
2092 | if (unlikely(ftrace_disabled)) | 2087 | if (unlikely(ftrace_disabled)) |
2093 | return -ENODEV; | 2088 | return -ENODEV; |
2094 | 2089 | ||
2090 | ret = __register_ftrace_function(ops); | ||
2091 | if (ret) | ||
2092 | return ret; | ||
2093 | |||
2095 | ftrace_start_up++; | 2094 | ftrace_start_up++; |
2096 | command |= FTRACE_UPDATE_CALLS; | 2095 | command |= FTRACE_UPDATE_CALLS; |
2097 | 2096 | ||
@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
2113 | return 0; | 2112 | return 0; |
2114 | } | 2113 | } |
2115 | 2114 | ||
2116 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) | 2115 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
2117 | { | 2116 | { |
2118 | bool hash_disable = true; | 2117 | bool hash_disable = true; |
2118 | int ret; | ||
2119 | 2119 | ||
2120 | if (unlikely(ftrace_disabled)) | 2120 | if (unlikely(ftrace_disabled)) |
2121 | return; | 2121 | return -ENODEV; |
2122 | |||
2123 | ret = __unregister_ftrace_function(ops); | ||
2124 | if (ret) | ||
2125 | return ret; | ||
2122 | 2126 | ||
2123 | ftrace_start_up--; | 2127 | ftrace_start_up--; |
2124 | /* | 2128 | /* |
@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2153 | } | 2157 | } |
2154 | 2158 | ||
2155 | if (!command || !ftrace_enabled) | 2159 | if (!command || !ftrace_enabled) |
2156 | return; | 2160 | return 0; |
2157 | 2161 | ||
2158 | ftrace_run_update_code(command); | 2162 | ftrace_run_update_code(command); |
2163 | return 0; | ||
2159 | } | 2164 | } |
2160 | 2165 | ||
2161 | static void ftrace_startup_sysctl(void) | 2166 | static void ftrace_startup_sysctl(void) |
@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void) | |||
3060 | if (i == FTRACE_FUNC_HASHSIZE) | 3065 | if (i == FTRACE_FUNC_HASHSIZE) |
3061 | return; | 3066 | return; |
3062 | 3067 | ||
3063 | ret = __register_ftrace_function(&trace_probe_ops); | 3068 | ret = ftrace_startup(&trace_probe_ops, 0); |
3064 | if (!ret) | ||
3065 | ret = ftrace_startup(&trace_probe_ops, 0); | ||
3066 | 3069 | ||
3067 | ftrace_probe_registered = 1; | 3070 | ftrace_probe_registered = 1; |
3068 | } | 3071 | } |
3069 | 3072 | ||
3070 | static void __disable_ftrace_function_probe(void) | 3073 | static void __disable_ftrace_function_probe(void) |
3071 | { | 3074 | { |
3072 | int ret; | ||
3073 | int i; | 3075 | int i; |
3074 | 3076 | ||
3075 | if (!ftrace_probe_registered) | 3077 | if (!ftrace_probe_registered) |
@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void) | |||
3082 | } | 3084 | } |
3083 | 3085 | ||
3084 | /* no more funcs left */ | 3086 | /* no more funcs left */ |
3085 | ret = __unregister_ftrace_function(&trace_probe_ops); | 3087 | ftrace_shutdown(&trace_probe_ops, 0); |
3086 | if (!ret) | ||
3087 | ftrace_shutdown(&trace_probe_ops, 0); | ||
3088 | 3088 | ||
3089 | ftrace_probe_registered = 0; | 3089 | ftrace_probe_registered = 0; |
3090 | } | 3090 | } |
@@ -4366,12 +4366,15 @@ core_initcall(ftrace_nodyn_init); | |||
4366 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 4366 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
4367 | static inline void ftrace_startup_enable(int command) { } | 4367 | static inline void ftrace_startup_enable(int command) { } |
4368 | /* Keep as macros so we do not need to define the commands */ | 4368 | /* Keep as macros so we do not need to define the commands */ |
4369 | # define ftrace_startup(ops, command) \ | 4369 | # define ftrace_startup(ops, command) \ |
4370 | ({ \ | 4370 | ({ \ |
4371 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4371 | int ___ret = __register_ftrace_function(ops); \ |
4372 | 0; \ | 4372 | if (!___ret) \ |
4373 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | ||
4374 | ___ret; \ | ||
4373 | }) | 4375 | }) |
4374 | # define ftrace_shutdown(ops, command) do { } while (0) | 4376 | # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) |
4377 | |||
4375 | # define ftrace_startup_sysctl() do { } while (0) | 4378 | # define ftrace_startup_sysctl() do { } while (0) |
4376 | # define ftrace_shutdown_sysctl() do { } while (0) | 4379 | # define ftrace_shutdown_sysctl() do { } while (0) |
4377 | 4380 | ||
@@ -4780,9 +4783,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
4780 | 4783 | ||
4781 | mutex_lock(&ftrace_lock); | 4784 | mutex_lock(&ftrace_lock); |
4782 | 4785 | ||
4783 | ret = __register_ftrace_function(ops); | 4786 | ret = ftrace_startup(ops, 0); |
4784 | if (!ret) | ||
4785 | ret = ftrace_startup(ops, 0); | ||
4786 | 4787 | ||
4787 | mutex_unlock(&ftrace_lock); | 4788 | mutex_unlock(&ftrace_lock); |
4788 | 4789 | ||
@@ -4801,9 +4802,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
4801 | int ret; | 4802 | int ret; |
4802 | 4803 | ||
4803 | mutex_lock(&ftrace_lock); | 4804 | mutex_lock(&ftrace_lock); |
4804 | ret = __unregister_ftrace_function(ops); | 4805 | ret = ftrace_shutdown(ops, 0); |
4805 | if (!ret) | ||
4806 | ftrace_shutdown(ops, 0); | ||
4807 | mutex_unlock(&ftrace_lock); | 4806 | mutex_unlock(&ftrace_lock); |
4808 | 4807 | ||
4809 | return ret; | 4808 | return ret; |
@@ -4997,6 +4996,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | |||
4997 | return NOTIFY_DONE; | 4996 | return NOTIFY_DONE; |
4998 | } | 4997 | } |
4999 | 4998 | ||
4999 | /* Just a place holder for function graph */ | ||
5000 | static struct ftrace_ops fgraph_ops __read_mostly = { | ||
5001 | .func = ftrace_stub, | ||
5002 | .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL | | ||
5003 | FTRACE_OPS_FL_RECURSION_SAFE, | ||
5004 | }; | ||
5005 | |||
5000 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 5006 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
5001 | trace_func_graph_ent_t entryfunc) | 5007 | trace_func_graph_ent_t entryfunc) |
5002 | { | 5008 | { |
@@ -5023,7 +5029,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5023 | ftrace_graph_return = retfunc; | 5029 | ftrace_graph_return = retfunc; |
5024 | ftrace_graph_entry = entryfunc; | 5030 | ftrace_graph_entry = entryfunc; |
5025 | 5031 | ||
5026 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | 5032 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); |
5027 | 5033 | ||
5028 | out: | 5034 | out: |
5029 | mutex_unlock(&ftrace_lock); | 5035 | mutex_unlock(&ftrace_lock); |
@@ -5040,7 +5046,7 @@ void unregister_ftrace_graph(void) | |||
5040 | ftrace_graph_active--; | 5046 | ftrace_graph_active--; |
5041 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5047 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
5042 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5048 | ftrace_graph_entry = ftrace_graph_entry_stub; |
5043 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5049 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); |
5044 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5050 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5045 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5051 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5046 | 5052 | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 78e27e3b52ac..e854f420e033 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -24,6 +24,12 @@ static int total_ref_count; | |||
24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | 24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, |
25 | struct perf_event *p_event) | 25 | struct perf_event *p_event) |
26 | { | 26 | { |
27 | if (tp_event->perf_perm) { | ||
28 | int ret = tp_event->perf_perm(tp_event, p_event); | ||
29 | if (ret) | ||
30 | return ret; | ||
31 | } | ||
32 | |||
27 | /* The ftrace function trace is allowed only for root. */ | 33 | /* The ftrace function trace is allowed only for root. */ |
28 | if (ftrace_event_is_function(tp_event) && | 34 | if (ftrace_event_is_function(tp_event) && |
29 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | 35 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) |
@@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
173 | int perf_trace_init(struct perf_event *p_event) | 179 | int perf_trace_init(struct perf_event *p_event) |
174 | { | 180 | { |
175 | struct ftrace_event_call *tp_event; | 181 | struct ftrace_event_call *tp_event; |
176 | int event_id = p_event->attr.config; | 182 | u64 event_id = p_event->attr.config; |
177 | int ret = -EINVAL; | 183 | int ret = -EINVAL; |
178 | 184 | ||
179 | mutex_lock(&event_mutex); | 185 | mutex_lock(&event_mutex); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f919a2e21bf3..a11800ae96de 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr) | |||
2314 | /* Disable any running events */ | 2314 | /* Disable any running events */ |
2315 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); | 2315 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); |
2316 | 2316 | ||
2317 | /* Access to events are within rcu_read_lock_sched() */ | ||
2318 | synchronize_sched(); | ||
2319 | |||
2317 | down_write(&trace_event_sem); | 2320 | down_write(&trace_event_sem); |
2318 | __trace_remove_event_dirs(tr); | 2321 | __trace_remove_event_dirs(tr); |
2319 | debugfs_remove_recursive(tr->event_dir); | 2322 | debugfs_remove_recursive(tr->event_dir); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e4b6d11bdf78..ea90eb5f6f17 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
431 | if (!tr->sys_refcount_enter) | 431 | if (!tr->sys_refcount_enter) |
432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
433 | mutex_unlock(&syscall_trace_lock); | 433 | mutex_unlock(&syscall_trace_lock); |
434 | /* | ||
435 | * Callers expect the event to be completely disabled on | ||
436 | * return, so wait for current handlers to finish. | ||
437 | */ | ||
438 | synchronize_sched(); | ||
439 | } | 434 | } |
440 | 435 | ||
441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 436 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
474 | if (!tr->sys_refcount_exit) | 469 | if (!tr->sys_refcount_exit) |
475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 470 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
476 | mutex_unlock(&syscall_trace_lock); | 471 | mutex_unlock(&syscall_trace_lock); |
477 | /* | ||
478 | * Callers expect the event to be completely disabled on | ||
479 | * return, so wait for current handlers to finish. | ||
480 | */ | ||
481 | synchronize_sched(); | ||
482 | } | 472 | } |
483 | 473 | ||
484 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 474 | static int __init init_syscall_trace(struct ftrace_event_call *call) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 987293d03ebc..c66912be990f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | |||
305 | /* I: attributes used when instantiating standard unbound pools on demand */ | 305 | /* I: attributes used when instantiating standard unbound pools on demand */ |
306 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 306 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
307 | 307 | ||
308 | /* I: attributes used when instantiating ordered pools on demand */ | ||
309 | static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; | ||
310 | |||
308 | struct workqueue_struct *system_wq __read_mostly; | 311 | struct workqueue_struct *system_wq __read_mostly; |
309 | EXPORT_SYMBOL(system_wq); | 312 | EXPORT_SYMBOL(system_wq); |
310 | struct workqueue_struct *system_highpri_wq __read_mostly; | 313 | struct workqueue_struct *system_highpri_wq __read_mostly; |
@@ -518,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { } | |||
518 | static inline void debug_work_deactivate(struct work_struct *work) { } | 521 | static inline void debug_work_deactivate(struct work_struct *work) { } |
519 | #endif | 522 | #endif |
520 | 523 | ||
521 | /* allocate ID and assign it to @pool */ | 524 | /** |
525 | * worker_pool_assign_id - allocate ID and assing it to @pool | ||
526 | * @pool: the pool pointer of interest | ||
527 | * | ||
528 | * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned | ||
529 | * successfully, -errno on failure. | ||
530 | */ | ||
522 | static int worker_pool_assign_id(struct worker_pool *pool) | 531 | static int worker_pool_assign_id(struct worker_pool *pool) |
523 | { | 532 | { |
524 | int ret; | 533 | int ret; |
525 | 534 | ||
526 | lockdep_assert_held(&wq_pool_mutex); | 535 | lockdep_assert_held(&wq_pool_mutex); |
527 | 536 | ||
528 | ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); | 537 | ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, |
538 | GFP_KERNEL); | ||
529 | if (ret >= 0) { | 539 | if (ret >= 0) { |
530 | pool->id = ret; | 540 | pool->id = ret; |
531 | return 0; | 541 | return 0; |
@@ -1320,7 +1330,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1320 | 1330 | ||
1321 | debug_work_activate(work); | 1331 | debug_work_activate(work); |
1322 | 1332 | ||
1323 | /* if dying, only works from the same workqueue are allowed */ | 1333 | /* if draining, only works from the same workqueue are allowed */ |
1324 | if (unlikely(wq->flags & __WQ_DRAINING) && | 1334 | if (unlikely(wq->flags & __WQ_DRAINING) && |
1325 | WARN_ON_ONCE(!is_chained_work(wq))) | 1335 | WARN_ON_ONCE(!is_chained_work(wq))) |
1326 | return; | 1336 | return; |
@@ -1736,16 +1746,17 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1736 | if (IS_ERR(worker->task)) | 1746 | if (IS_ERR(worker->task)) |
1737 | goto fail; | 1747 | goto fail; |
1738 | 1748 | ||
1749 | set_user_nice(worker->task, pool->attrs->nice); | ||
1750 | |||
1751 | /* prevent userland from meddling with cpumask of workqueue workers */ | ||
1752 | worker->task->flags |= PF_NO_SETAFFINITY; | ||
1753 | |||
1739 | /* | 1754 | /* |
1740 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any | 1755 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any |
1741 | * online CPUs. It'll be re-applied when any of the CPUs come up. | 1756 | * online CPUs. It'll be re-applied when any of the CPUs come up. |
1742 | */ | 1757 | */ |
1743 | set_user_nice(worker->task, pool->attrs->nice); | ||
1744 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); | 1758 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); |
1745 | 1759 | ||
1746 | /* prevent userland from meddling with cpumask of workqueue workers */ | ||
1747 | worker->task->flags |= PF_NO_SETAFFINITY; | ||
1748 | |||
1749 | /* | 1760 | /* |
1750 | * The caller is responsible for ensuring %POOL_DISASSOCIATED | 1761 | * The caller is responsible for ensuring %POOL_DISASSOCIATED |
1751 | * remains stable across this function. See the comments above the | 1762 | * remains stable across this function. See the comments above the |
@@ -4106,7 +4117,7 @@ out_unlock: | |||
4106 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) | 4117 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) |
4107 | { | 4118 | { |
4108 | bool highpri = wq->flags & WQ_HIGHPRI; | 4119 | bool highpri = wq->flags & WQ_HIGHPRI; |
4109 | int cpu; | 4120 | int cpu, ret; |
4110 | 4121 | ||
4111 | if (!(wq->flags & WQ_UNBOUND)) { | 4122 | if (!(wq->flags & WQ_UNBOUND)) { |
4112 | wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); | 4123 | wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); |
@@ -4126,6 +4137,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
4126 | mutex_unlock(&wq->mutex); | 4137 | mutex_unlock(&wq->mutex); |
4127 | } | 4138 | } |
4128 | return 0; | 4139 | return 0; |
4140 | } else if (wq->flags & __WQ_ORDERED) { | ||
4141 | ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); | ||
4142 | /* there should only be single pwq for ordering guarantee */ | ||
4143 | WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || | ||
4144 | wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), | ||
4145 | "ordering guarantee broken for workqueue %s\n", wq->name); | ||
4146 | return ret; | ||
4129 | } else { | 4147 | } else { |
4130 | return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); | 4148 | return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); |
4131 | } | 4149 | } |
@@ -5009,10 +5027,6 @@ static int __init init_workqueues(void) | |||
5009 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; | 5027 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; |
5010 | int i, cpu; | 5028 | int i, cpu; |
5011 | 5029 | ||
5012 | /* make sure we have enough bits for OFFQ pool ID */ | ||
5013 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < | ||
5014 | WORK_CPU_END * NR_STD_WORKER_POOLS); | ||
5015 | |||
5016 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); | 5030 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); |
5017 | 5031 | ||
5018 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5032 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
@@ -5051,13 +5065,23 @@ static int __init init_workqueues(void) | |||
5051 | } | 5065 | } |
5052 | } | 5066 | } |
5053 | 5067 | ||
5054 | /* create default unbound wq attrs */ | 5068 | /* create default unbound and ordered wq attrs */ |
5055 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { | 5069 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { |
5056 | struct workqueue_attrs *attrs; | 5070 | struct workqueue_attrs *attrs; |
5057 | 5071 | ||
5058 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); | 5072 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); |
5059 | attrs->nice = std_nice[i]; | 5073 | attrs->nice = std_nice[i]; |
5060 | unbound_std_wq_attrs[i] = attrs; | 5074 | unbound_std_wq_attrs[i] = attrs; |
5075 | |||
5076 | /* | ||
5077 | * An ordered wq should have only one pwq as ordering is | ||
5078 | * guaranteed by max_active which is enforced by pwqs. | ||
5079 | * Turn off NUMA so that dfl_pwq is used for all nodes. | ||
5080 | */ | ||
5081 | BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); | ||
5082 | attrs->nice = std_nice[i]; | ||
5083 | attrs->no_numa = true; | ||
5084 | ordered_wq_attrs[i] = attrs; | ||
5061 | } | 5085 | } |
5062 | 5086 | ||
5063 | system_wq = alloc_workqueue("events", 0, 0); | 5087 | system_wq = alloc_workqueue("events", 0, 0); |