diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-03 21:00:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-03 21:00:13 -0400 |
commit | d92cd810e64aa7cf22b05f0ea1c7d3e8dbae75fe (patch) | |
tree | 592e040010a30d1dbce4e54eb597011af0df290e | |
parent | a23867f1d2de572f84b459651dfe99fa9e79fadf (diff) | |
parent | f75da8a8a918d7c343a2ec95d1ed99e5689e0f23 (diff) |
Merge branch 'for-4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo:
"rcu_work addition and a couple trivial changes"
* 'for-4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: remove the comment about the old manager_arb mutex
workqueue: fix the comments of nr_idle
fs/aio: Use rcu_work instead of explicit rcu and work item
cgroup: Use rcu_work instead of explicit rcu and work item
RCU, workqueue: Implement rcu_work
-rw-r--r-- | fs/aio.c | 21 | ||||
-rw-r--r-- | include/linux/cgroup-defs.h | 2 | ||||
-rw-r--r-- | include/linux/workqueue.h | 23 | ||||
-rw-r--r-- | kernel/cgroup/cgroup.c | 21 | ||||
-rw-r--r-- | kernel/workqueue.c | 60 |
5 files changed, 93 insertions, 34 deletions
@@ -115,8 +115,7 @@ struct kioctx { | |||
115 | struct page **ring_pages; | 115 | struct page **ring_pages; |
116 | long nr_pages; | 116 | long nr_pages; |
117 | 117 | ||
118 | struct rcu_head free_rcu; | 118 | struct rcu_work free_rwork; /* see free_ioctx() */ |
119 | struct work_struct free_work; /* see free_ioctx() */ | ||
120 | 119 | ||
121 | /* | 120 | /* |
122 | * signals when all in-flight requests are done | 121 | * signals when all in-flight requests are done |
@@ -592,13 +591,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | |||
592 | /* | 591 | /* |
593 | * free_ioctx() should be RCU delayed to synchronize against the RCU | 592 | * free_ioctx() should be RCU delayed to synchronize against the RCU |
594 | * protected lookup_ioctx() and also needs process context to call | 593 | * protected lookup_ioctx() and also needs process context to call |
595 | * aio_free_ring(), so the double bouncing through kioctx->free_rcu and | 594 | * aio_free_ring(). Use rcu_work. |
596 | * ->free_work. | ||
597 | */ | 595 | */ |
598 | static void free_ioctx(struct work_struct *work) | 596 | static void free_ioctx(struct work_struct *work) |
599 | { | 597 | { |
600 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | 598 | struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, |
601 | 599 | free_rwork); | |
602 | pr_debug("freeing %p\n", ctx); | 600 | pr_debug("freeing %p\n", ctx); |
603 | 601 | ||
604 | aio_free_ring(ctx); | 602 | aio_free_ring(ctx); |
@@ -608,14 +606,6 @@ static void free_ioctx(struct work_struct *work) | |||
608 | kmem_cache_free(kioctx_cachep, ctx); | 606 | kmem_cache_free(kioctx_cachep, ctx); |
609 | } | 607 | } |
610 | 608 | ||
611 | static void free_ioctx_rcufn(struct rcu_head *head) | ||
612 | { | ||
613 | struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); | ||
614 | |||
615 | INIT_WORK(&ctx->free_work, free_ioctx); | ||
616 | schedule_work(&ctx->free_work); | ||
617 | } | ||
618 | |||
619 | static void free_ioctx_reqs(struct percpu_ref *ref) | 609 | static void free_ioctx_reqs(struct percpu_ref *ref) |
620 | { | 610 | { |
621 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | 611 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
@@ -625,7 +615,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | |||
625 | complete(&ctx->rq_wait->comp); | 615 | complete(&ctx->rq_wait->comp); |
626 | 616 | ||
627 | /* Synchronize against RCU protected table->table[] dereferences */ | 617 | /* Synchronize against RCU protected table->table[] dereferences */ |
628 | call_rcu(&ctx->free_rcu, free_ioctx_rcufn); | 618 | INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); |
619 | queue_rcu_work(system_wq, &ctx->free_rwork); | ||
629 | } | 620 | } |
630 | 621 | ||
631 | /* | 622 | /* |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index f8e76d01a5ad..dc5b70449dc6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -151,8 +151,8 @@ struct cgroup_subsys_state { | |||
151 | atomic_t online_cnt; | 151 | atomic_t online_cnt; |
152 | 152 | ||
153 | /* percpu_ref killing and RCU release */ | 153 | /* percpu_ref killing and RCU release */ |
154 | struct rcu_head rcu_head; | ||
155 | struct work_struct destroy_work; | 154 | struct work_struct destroy_work; |
155 | struct rcu_work destroy_rwork; | ||
156 | 156 | ||
157 | /* | 157 | /* |
158 | * PI: the parent css. Placed here for cache proximity to following | 158 | * PI: the parent css. Placed here for cache proximity to following |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0c3301421c57..39a0e215022a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/atomic.h> | 14 | #include <linux/atomic.h> |
15 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
16 | #include <linux/rcupdate.h> | ||
16 | 17 | ||
17 | struct workqueue_struct; | 18 | struct workqueue_struct; |
18 | 19 | ||
@@ -120,6 +121,14 @@ struct delayed_work { | |||
120 | int cpu; | 121 | int cpu; |
121 | }; | 122 | }; |
122 | 123 | ||
124 | struct rcu_work { | ||
125 | struct work_struct work; | ||
126 | struct rcu_head rcu; | ||
127 | |||
128 | /* target workqueue ->rcu uses to queue ->work */ | ||
129 | struct workqueue_struct *wq; | ||
130 | }; | ||
131 | |||
123 | /** | 132 | /** |
124 | * struct workqueue_attrs - A struct for workqueue attributes. | 133 | * struct workqueue_attrs - A struct for workqueue attributes. |
125 | * | 134 | * |
@@ -151,6 +160,11 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work) | |||
151 | return container_of(work, struct delayed_work, work); | 160 | return container_of(work, struct delayed_work, work); |
152 | } | 161 | } |
153 | 162 | ||
163 | static inline struct rcu_work *to_rcu_work(struct work_struct *work) | ||
164 | { | ||
165 | return container_of(work, struct rcu_work, work); | ||
166 | } | ||
167 | |||
154 | struct execute_work { | 168 | struct execute_work { |
155 | struct work_struct work; | 169 | struct work_struct work; |
156 | }; | 170 | }; |
@@ -266,6 +280,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
266 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ | 280 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ |
267 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | 281 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) |
268 | 282 | ||
283 | #define INIT_RCU_WORK(_work, _func) \ | ||
284 | INIT_WORK(&(_work)->work, (_func)) | ||
285 | |||
286 | #define INIT_RCU_WORK_ONSTACK(_work, _func) \ | ||
287 | INIT_WORK_ONSTACK(&(_work)->work, (_func)) | ||
288 | |||
269 | /** | 289 | /** |
270 | * work_pending - Find out whether a work item is currently pending | 290 | * work_pending - Find out whether a work item is currently pending |
271 | * @work: The work item in question | 291 | * @work: The work item in question |
@@ -447,6 +467,7 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
447 | struct delayed_work *work, unsigned long delay); | 467 | struct delayed_work *work, unsigned long delay); |
448 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | 468 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
449 | struct delayed_work *dwork, unsigned long delay); | 469 | struct delayed_work *dwork, unsigned long delay); |
470 | extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); | ||
450 | 471 | ||
451 | extern void flush_workqueue(struct workqueue_struct *wq); | 472 | extern void flush_workqueue(struct workqueue_struct *wq); |
452 | extern void drain_workqueue(struct workqueue_struct *wq); | 473 | extern void drain_workqueue(struct workqueue_struct *wq); |
@@ -462,6 +483,8 @@ extern bool flush_delayed_work(struct delayed_work *dwork); | |||
462 | extern bool cancel_delayed_work(struct delayed_work *dwork); | 483 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
463 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | 484 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
464 | 485 | ||
486 | extern bool flush_rcu_work(struct rcu_work *rwork); | ||
487 | |||
465 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 488 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
466 | int max_active); | 489 | int max_active); |
467 | extern struct work_struct *current_work(void); | 490 | extern struct work_struct *current_work(void); |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4bfb2908ec15..a662bfcbea0e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -4524,10 +4524,10 @@ static struct cftype cgroup_base_files[] = { | |||
4524 | * and thus involve punting to css->destroy_work adding two additional | 4524 | * and thus involve punting to css->destroy_work adding two additional |
4525 | * steps to the already complex sequence. | 4525 | * steps to the already complex sequence. |
4526 | */ | 4526 | */ |
4527 | static void css_free_work_fn(struct work_struct *work) | 4527 | static void css_free_rwork_fn(struct work_struct *work) |
4528 | { | 4528 | { |
4529 | struct cgroup_subsys_state *css = | 4529 | struct cgroup_subsys_state *css = container_of(to_rcu_work(work), |
4530 | container_of(work, struct cgroup_subsys_state, destroy_work); | 4530 | struct cgroup_subsys_state, destroy_rwork); |
4531 | struct cgroup_subsys *ss = css->ss; | 4531 | struct cgroup_subsys *ss = css->ss; |
4532 | struct cgroup *cgrp = css->cgroup; | 4532 | struct cgroup *cgrp = css->cgroup; |
4533 | 4533 | ||
@@ -4573,15 +4573,6 @@ static void css_free_work_fn(struct work_struct *work) | |||
4573 | } | 4573 | } |
4574 | } | 4574 | } |
4575 | 4575 | ||
4576 | static void css_free_rcu_fn(struct rcu_head *rcu_head) | ||
4577 | { | ||
4578 | struct cgroup_subsys_state *css = | ||
4579 | container_of(rcu_head, struct cgroup_subsys_state, rcu_head); | ||
4580 | |||
4581 | INIT_WORK(&css->destroy_work, css_free_work_fn); | ||
4582 | queue_work(cgroup_destroy_wq, &css->destroy_work); | ||
4583 | } | ||
4584 | |||
4585 | static void css_release_work_fn(struct work_struct *work) | 4576 | static void css_release_work_fn(struct work_struct *work) |
4586 | { | 4577 | { |
4587 | struct cgroup_subsys_state *css = | 4578 | struct cgroup_subsys_state *css = |
@@ -4631,7 +4622,8 @@ static void css_release_work_fn(struct work_struct *work) | |||
4631 | 4622 | ||
4632 | mutex_unlock(&cgroup_mutex); | 4623 | mutex_unlock(&cgroup_mutex); |
4633 | 4624 | ||
4634 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 4625 | INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); |
4626 | queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); | ||
4635 | } | 4627 | } |
4636 | 4628 | ||
4637 | static void css_release(struct percpu_ref *ref) | 4629 | static void css_release(struct percpu_ref *ref) |
@@ -4765,7 +4757,8 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, | |||
4765 | err_list_del: | 4757 | err_list_del: |
4766 | list_del_rcu(&css->sibling); | 4758 | list_del_rcu(&css->sibling); |
4767 | err_free_css: | 4759 | err_free_css: |
4768 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 4760 | INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); |
4761 | queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); | ||
4769 | return ERR_PTR(err); | 4762 | return ERR_PTR(err); |
4770 | } | 4763 | } |
4771 | 4764 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 254e636a3d6b..ca7959be8aaa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -153,10 +153,9 @@ struct worker_pool { | |||
153 | unsigned long watchdog_ts; /* L: watchdog timestamp */ | 153 | unsigned long watchdog_ts; /* L: watchdog timestamp */ |
154 | 154 | ||
155 | struct list_head worklist; /* L: list of pending works */ | 155 | struct list_head worklist; /* L: list of pending works */ |
156 | int nr_workers; /* L: total number of workers */ | ||
157 | 156 | ||
158 | /* nr_idle includes the ones off idle_list for rebinding */ | 157 | int nr_workers; /* L: total number of workers */ |
159 | int nr_idle; /* L: currently idle ones */ | 158 | int nr_idle; /* L: currently idle workers */ |
160 | 159 | ||
161 | struct list_head idle_list; /* X: list of idle workers */ | 160 | struct list_head idle_list; /* X: list of idle workers */ |
162 | struct timer_list idle_timer; /* L: worker idle timeout */ | 161 | struct timer_list idle_timer; /* L: worker idle timeout */ |
@@ -166,7 +165,6 @@ struct worker_pool { | |||
166 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); | 165 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); |
167 | /* L: hash of busy workers */ | 166 | /* L: hash of busy workers */ |
168 | 167 | ||
169 | /* see manage_workers() for details on the two manager mutexes */ | ||
170 | struct worker *manager; /* L: purely informational */ | 168 | struct worker *manager; /* L: purely informational */ |
171 | struct mutex attach_mutex; /* attach/detach exclusion */ | 169 | struct mutex attach_mutex; /* attach/detach exclusion */ |
172 | struct list_head workers; /* A: attached workers */ | 170 | struct list_head workers; /* A: attached workers */ |
@@ -1604,6 +1602,40 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1604 | } | 1602 | } |
1605 | EXPORT_SYMBOL_GPL(mod_delayed_work_on); | 1603 | EXPORT_SYMBOL_GPL(mod_delayed_work_on); |
1606 | 1604 | ||
1605 | static void rcu_work_rcufn(struct rcu_head *rcu) | ||
1606 | { | ||
1607 | struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); | ||
1608 | |||
1609 | /* read the comment in __queue_work() */ | ||
1610 | local_irq_disable(); | ||
1611 | __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); | ||
1612 | local_irq_enable(); | ||
1613 | } | ||
1614 | |||
1615 | /** | ||
1616 | * queue_rcu_work - queue work after a RCU grace period | ||
1617 | * @wq: workqueue to use | ||
1618 | * @rwork: work to queue | ||
1619 | * | ||
1620 | * Return: %false if @rwork was already pending, %true otherwise. Note | ||
1621 | * that a full RCU grace period is guaranteed only after a %true return. | ||
1622 | * While @rwork is guarnateed to be executed after a %false return, the | ||
1623 | * execution may happen before a full RCU grace period has passed. | ||
1624 | */ | ||
1625 | bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) | ||
1626 | { | ||
1627 | struct work_struct *work = &rwork->work; | ||
1628 | |||
1629 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | ||
1630 | rwork->wq = wq; | ||
1631 | call_rcu(&rwork->rcu, rcu_work_rcufn); | ||
1632 | return true; | ||
1633 | } | ||
1634 | |||
1635 | return false; | ||
1636 | } | ||
1637 | EXPORT_SYMBOL(queue_rcu_work); | ||
1638 | |||
1607 | /** | 1639 | /** |
1608 | * worker_enter_idle - enter idle state | 1640 | * worker_enter_idle - enter idle state |
1609 | * @worker: worker which is entering idle state | 1641 | * @worker: worker which is entering idle state |
@@ -3001,6 +3033,26 @@ bool flush_delayed_work(struct delayed_work *dwork) | |||
3001 | } | 3033 | } |
3002 | EXPORT_SYMBOL(flush_delayed_work); | 3034 | EXPORT_SYMBOL(flush_delayed_work); |
3003 | 3035 | ||
3036 | /** | ||
3037 | * flush_rcu_work - wait for a rwork to finish executing the last queueing | ||
3038 | * @rwork: the rcu work to flush | ||
3039 | * | ||
3040 | * Return: | ||
3041 | * %true if flush_rcu_work() waited for the work to finish execution, | ||
3042 | * %false if it was already idle. | ||
3043 | */ | ||
3044 | bool flush_rcu_work(struct rcu_work *rwork) | ||
3045 | { | ||
3046 | if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { | ||
3047 | rcu_barrier(); | ||
3048 | flush_work(&rwork->work); | ||
3049 | return true; | ||
3050 | } else { | ||
3051 | return flush_work(&rwork->work); | ||
3052 | } | ||
3053 | } | ||
3054 | EXPORT_SYMBOL(flush_rcu_work); | ||
3055 | |||
3004 | static bool __cancel_work(struct work_struct *work, bool is_dwork) | 3056 | static bool __cancel_work(struct work_struct *work, bool is_dwork) |
3005 | { | 3057 | { |
3006 | unsigned long flags; | 3058 | unsigned long flags; |