diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:12 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:12 -0400 |
commit | a0a1a5fd4fb15ec61117c759fe9f5c16c53d9e9c (patch) | |
tree | 6df8eddcaeff6cde892c8cae6bdfa7653805e144 /kernel | |
parent | 1e19ffc63dbbaea7a7d1c63d99c38d3e5a4c7edf (diff) |
workqueue: reimplement workqueue freeze using max_active
Currently, workqueue freezing is implemented by marking the worker
freezeable and calling try_to_freeze() from dispatch loop.
Reimplement it using cwq->limit so that the workqueue is frozen
instead of the worker.
* workqueue_struct->saved_max_active is added which stores the
specified max_active on initialization.
* On freeze, all cwq->max_active's are quenched to zero. Freezing is
complete when nr_active on all cwqs reach zero.
* On thaw, all cwq->max_active's are restored to wq->saved_max_active
and the worklist is repopulated.
This new implementation allows having single shared pool of workers
per cpu.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/process.c | 21 | ||||
-rw-r--r-- | kernel/workqueue.c | 163 |
2 files changed, 172 insertions, 12 deletions
diff --git a/kernel/power/process.c b/kernel/power/process.c index 71ae29052ab6..028a99598f49 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
16 | #include <linux/freezer.h> | 16 | #include <linux/freezer.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/workqueue.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Timeout for stopping processes | 21 | * Timeout for stopping processes |
@@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
35 | struct task_struct *g, *p; | 36 | struct task_struct *g, *p; |
36 | unsigned long end_time; | 37 | unsigned long end_time; |
37 | unsigned int todo; | 38 | unsigned int todo; |
39 | bool wq_busy = false; | ||
38 | struct timeval start, end; | 40 | struct timeval start, end; |
39 | u64 elapsed_csecs64; | 41 | u64 elapsed_csecs64; |
40 | unsigned int elapsed_csecs; | 42 | unsigned int elapsed_csecs; |
@@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only) | |||
42 | do_gettimeofday(&start); | 44 | do_gettimeofday(&start); |
43 | 45 | ||
44 | end_time = jiffies + TIMEOUT; | 46 | end_time = jiffies + TIMEOUT; |
47 | |||
48 | if (!sig_only) | ||
49 | freeze_workqueues_begin(); | ||
50 | |||
45 | while (true) { | 51 | while (true) { |
46 | todo = 0; | 52 | todo = 0; |
47 | read_lock(&tasklist_lock); | 53 | read_lock(&tasklist_lock); |
@@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only) | |||
63 | todo++; | 69 | todo++; |
64 | } while_each_thread(g, p); | 70 | } while_each_thread(g, p); |
65 | read_unlock(&tasklist_lock); | 71 | read_unlock(&tasklist_lock); |
72 | |||
73 | if (!sig_only) { | ||
74 | wq_busy = freeze_workqueues_busy(); | ||
75 | todo += wq_busy; | ||
76 | } | ||
77 | |||
66 | if (!todo || time_after(jiffies, end_time)) | 78 | if (!todo || time_after(jiffies, end_time)) |
67 | break; | 79 | break; |
68 | 80 | ||
@@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only) | |||
86 | */ | 98 | */ |
87 | printk("\n"); | 99 | printk("\n"); |
88 | printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " | 100 | printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " |
89 | "(%d tasks refusing to freeze):\n", | 101 | "(%d tasks refusing to freeze, wq_busy=%d):\n", |
90 | elapsed_csecs / 100, elapsed_csecs % 100, todo); | 102 | elapsed_csecs / 100, elapsed_csecs % 100, |
103 | todo - wq_busy, wq_busy); | ||
104 | |||
105 | thaw_workqueues(); | ||
106 | |||
91 | read_lock(&tasklist_lock); | 107 | read_lock(&tasklist_lock); |
92 | do_each_thread(g, p) { | 108 | do_each_thread(g, p) { |
93 | task_lock(p); | 109 | task_lock(p); |
@@ -157,6 +173,7 @@ void thaw_processes(void) | |||
157 | oom_killer_enable(); | 173 | oom_killer_enable(); |
158 | 174 | ||
159 | printk("Restarting tasks ... "); | 175 | printk("Restarting tasks ... "); |
176 | thaw_workqueues(); | ||
160 | thaw_tasks(true); | 177 | thaw_tasks(true); |
161 | thaw_tasks(false); | 178 | thaw_tasks(false); |
162 | schedule(); | 179 | schedule(); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e541b5db67dd..4d059c532792 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -78,7 +78,7 @@ struct cpu_workqueue_struct { | |||
78 | int nr_in_flight[WORK_NR_COLORS]; | 78 | int nr_in_flight[WORK_NR_COLORS]; |
79 | /* L: nr of in_flight works */ | 79 | /* L: nr of in_flight works */ |
80 | int nr_active; /* L: nr of active works */ | 80 | int nr_active; /* L: nr of active works */ |
81 | int max_active; /* I: max active works */ | 81 | int max_active; /* L: max active works */ |
82 | struct list_head delayed_works; /* L: delayed works */ | 82 | struct list_head delayed_works; /* L: delayed works */ |
83 | }; | 83 | }; |
84 | 84 | ||
@@ -108,6 +108,7 @@ struct workqueue_struct { | |||
108 | struct list_head flusher_queue; /* F: flush waiters */ | 108 | struct list_head flusher_queue; /* F: flush waiters */ |
109 | struct list_head flusher_overflow; /* F: flush overflow list */ | 109 | struct list_head flusher_overflow; /* F: flush overflow list */ |
110 | 110 | ||
111 | int saved_max_active; /* I: saved cwq max_active */ | ||
111 | const char *name; /* I: workqueue name */ | 112 | const char *name; /* I: workqueue name */ |
112 | #ifdef CONFIG_LOCKDEP | 113 | #ifdef CONFIG_LOCKDEP |
113 | struct lockdep_map lockdep_map; | 114 | struct lockdep_map lockdep_map; |
@@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { } | |||
228 | static DEFINE_SPINLOCK(workqueue_lock); | 229 | static DEFINE_SPINLOCK(workqueue_lock); |
229 | static LIST_HEAD(workqueues); | 230 | static LIST_HEAD(workqueues); |
230 | static DEFINE_PER_CPU(struct ida, worker_ida); | 231 | static DEFINE_PER_CPU(struct ida, worker_ida); |
232 | static bool workqueue_freezing; /* W: have wqs started freezing? */ | ||
231 | 233 | ||
232 | static int worker_thread(void *__worker); | 234 | static int worker_thread(void *__worker); |
233 | 235 | ||
@@ -745,19 +747,13 @@ static int worker_thread(void *__worker) | |||
745 | struct cpu_workqueue_struct *cwq = worker->cwq; | 747 | struct cpu_workqueue_struct *cwq = worker->cwq; |
746 | DEFINE_WAIT(wait); | 748 | DEFINE_WAIT(wait); |
747 | 749 | ||
748 | if (cwq->wq->flags & WQ_FREEZEABLE) | ||
749 | set_freezable(); | ||
750 | |||
751 | for (;;) { | 750 | for (;;) { |
752 | prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); | 751 | prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); |
753 | if (!freezing(current) && | 752 | if (!kthread_should_stop() && |
754 | !kthread_should_stop() && | ||
755 | list_empty(&cwq->worklist)) | 753 | list_empty(&cwq->worklist)) |
756 | schedule(); | 754 | schedule(); |
757 | finish_wait(&cwq->more_work, &wait); | 755 | finish_wait(&cwq->more_work, &wait); |
758 | 756 | ||
759 | try_to_freeze(); | ||
760 | |||
761 | if (kthread_should_stop()) | 757 | if (kthread_should_stop()) |
762 | break; | 758 | break; |
763 | 759 | ||
@@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
1553 | goto err; | 1549 | goto err; |
1554 | 1550 | ||
1555 | wq->flags = flags; | 1551 | wq->flags = flags; |
1552 | wq->saved_max_active = max_active; | ||
1556 | mutex_init(&wq->flush_mutex); | 1553 | mutex_init(&wq->flush_mutex); |
1557 | atomic_set(&wq->nr_cwqs_to_flush, 0); | 1554 | atomic_set(&wq->nr_cwqs_to_flush, 0); |
1558 | INIT_LIST_HEAD(&wq->flusher_queue); | 1555 | INIT_LIST_HEAD(&wq->flusher_queue); |
@@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
1591 | failed = true; | 1588 | failed = true; |
1592 | } | 1589 | } |
1593 | 1590 | ||
1591 | /* | ||
1592 | * workqueue_lock protects global freeze state and workqueues | ||
1593 | * list. Grab it, set max_active accordingly and add the new | ||
1594 | * workqueue to workqueues list. | ||
1595 | */ | ||
1594 | spin_lock(&workqueue_lock); | 1596 | spin_lock(&workqueue_lock); |
1597 | |||
1598 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | ||
1599 | for_each_possible_cpu(cpu) | ||
1600 | get_cwq(cpu, wq)->max_active = 0; | ||
1601 | |||
1595 | list_add(&wq->list, &workqueues); | 1602 | list_add(&wq->list, &workqueues); |
1603 | |||
1596 | spin_unlock(&workqueue_lock); | 1604 | spin_unlock(&workqueue_lock); |
1597 | 1605 | ||
1598 | cpu_maps_update_done(); | 1606 | cpu_maps_update_done(); |
@@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
1621 | { | 1629 | { |
1622 | int cpu; | 1630 | int cpu; |
1623 | 1631 | ||
1632 | flush_workqueue(wq); | ||
1633 | |||
1634 | /* | ||
1635 | * wq list is used to freeze wq, remove from list after | ||
1636 | * flushing is complete in case freeze races us. | ||
1637 | */ | ||
1624 | cpu_maps_update_begin(); | 1638 | cpu_maps_update_begin(); |
1625 | spin_lock(&workqueue_lock); | 1639 | spin_lock(&workqueue_lock); |
1626 | list_del(&wq->list); | 1640 | list_del(&wq->list); |
1627 | spin_unlock(&workqueue_lock); | 1641 | spin_unlock(&workqueue_lock); |
1628 | cpu_maps_update_done(); | 1642 | cpu_maps_update_done(); |
1629 | 1643 | ||
1630 | flush_workqueue(wq); | ||
1631 | |||
1632 | for_each_possible_cpu(cpu) { | 1644 | for_each_possible_cpu(cpu) { |
1633 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 1645 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
1634 | int i; | 1646 | int i; |
@@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
1722 | EXPORT_SYMBOL_GPL(work_on_cpu); | 1734 | EXPORT_SYMBOL_GPL(work_on_cpu); |
1723 | #endif /* CONFIG_SMP */ | 1735 | #endif /* CONFIG_SMP */ |
1724 | 1736 | ||
1737 | #ifdef CONFIG_FREEZER | ||
1738 | |||
1739 | /** | ||
1740 | * freeze_workqueues_begin - begin freezing workqueues | ||
1741 | * | ||
1742 | * Start freezing workqueues. After this function returns, all | ||
1743 | * freezeable workqueues will queue new works to their frozen_works | ||
1744 | * list instead of the cwq ones. | ||
1745 | * | ||
1746 | * CONTEXT: | ||
1747 | * Grabs and releases workqueue_lock and cwq->lock's. | ||
1748 | */ | ||
1749 | void freeze_workqueues_begin(void) | ||
1750 | { | ||
1751 | struct workqueue_struct *wq; | ||
1752 | unsigned int cpu; | ||
1753 | |||
1754 | spin_lock(&workqueue_lock); | ||
1755 | |||
1756 | BUG_ON(workqueue_freezing); | ||
1757 | workqueue_freezing = true; | ||
1758 | |||
1759 | for_each_possible_cpu(cpu) { | ||
1760 | list_for_each_entry(wq, &workqueues, list) { | ||
1761 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
1762 | |||
1763 | spin_lock_irq(&cwq->lock); | ||
1764 | |||
1765 | if (wq->flags & WQ_FREEZEABLE) | ||
1766 | cwq->max_active = 0; | ||
1767 | |||
1768 | spin_unlock_irq(&cwq->lock); | ||
1769 | } | ||
1770 | } | ||
1771 | |||
1772 | spin_unlock(&workqueue_lock); | ||
1773 | } | ||
1774 | |||
1775 | /** | ||
1776 | * freeze_workqueues_busy - are freezeable workqueues still busy? | ||
1777 | * | ||
1778 | * Check whether freezing is complete. This function must be called | ||
1779 | * between freeze_workqueues_begin() and thaw_workqueues(). | ||
1780 | * | ||
1781 | * CONTEXT: | ||
1782 | * Grabs and releases workqueue_lock. | ||
1783 | * | ||
1784 | * RETURNS: | ||
1785 | * %true if some freezeable workqueues are still busy. %false if | ||
1786 | * freezing is complete. | ||
1787 | */ | ||
1788 | bool freeze_workqueues_busy(void) | ||
1789 | { | ||
1790 | struct workqueue_struct *wq; | ||
1791 | unsigned int cpu; | ||
1792 | bool busy = false; | ||
1793 | |||
1794 | spin_lock(&workqueue_lock); | ||
1795 | |||
1796 | BUG_ON(!workqueue_freezing); | ||
1797 | |||
1798 | for_each_possible_cpu(cpu) { | ||
1799 | /* | ||
1800 | * nr_active is monotonically decreasing. It's safe | ||
1801 | * to peek without lock. | ||
1802 | */ | ||
1803 | list_for_each_entry(wq, &workqueues, list) { | ||
1804 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
1805 | |||
1806 | if (!(wq->flags & WQ_FREEZEABLE)) | ||
1807 | continue; | ||
1808 | |||
1809 | BUG_ON(cwq->nr_active < 0); | ||
1810 | if (cwq->nr_active) { | ||
1811 | busy = true; | ||
1812 | goto out_unlock; | ||
1813 | } | ||
1814 | } | ||
1815 | } | ||
1816 | out_unlock: | ||
1817 | spin_unlock(&workqueue_lock); | ||
1818 | return busy; | ||
1819 | } | ||
1820 | |||
1821 | /** | ||
1822 | * thaw_workqueues - thaw workqueues | ||
1823 | * | ||
1824 | * Thaw workqueues. Normal queueing is restored and all collected | ||
1825 | * frozen works are transferred to their respective cwq worklists. | ||
1826 | * | ||
1827 | * CONTEXT: | ||
1828 | * Grabs and releases workqueue_lock and cwq->lock's. | ||
1829 | */ | ||
1830 | void thaw_workqueues(void) | ||
1831 | { | ||
1832 | struct workqueue_struct *wq; | ||
1833 | unsigned int cpu; | ||
1834 | |||
1835 | spin_lock(&workqueue_lock); | ||
1836 | |||
1837 | if (!workqueue_freezing) | ||
1838 | goto out_unlock; | ||
1839 | |||
1840 | for_each_possible_cpu(cpu) { | ||
1841 | list_for_each_entry(wq, &workqueues, list) { | ||
1842 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
1843 | |||
1844 | if (!(wq->flags & WQ_FREEZEABLE)) | ||
1845 | continue; | ||
1846 | |||
1847 | spin_lock_irq(&cwq->lock); | ||
1848 | |||
1849 | /* restore max_active and repopulate worklist */ | ||
1850 | cwq->max_active = wq->saved_max_active; | ||
1851 | |||
1852 | while (!list_empty(&cwq->delayed_works) && | ||
1853 | cwq->nr_active < cwq->max_active) | ||
1854 | cwq_activate_first_delayed(cwq); | ||
1855 | |||
1856 | wake_up(&cwq->more_work); | ||
1857 | |||
1858 | spin_unlock_irq(&cwq->lock); | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | workqueue_freezing = false; | ||
1863 | out_unlock: | ||
1864 | spin_unlock(&workqueue_lock); | ||
1865 | } | ||
1866 | #endif /* CONFIG_FREEZER */ | ||
1867 | |||
1725 | void __init init_workqueues(void) | 1868 | void __init init_workqueues(void) |
1726 | { | 1869 | { |
1727 | unsigned int cpu; | 1870 | unsigned int cpu; |