aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/pm_qos_params.c12
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/workqueue.c53
6 files changed, 50 insertions, 23 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 3c2d4972d235..de407c78178d 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
741}; 741};
742 742
743#ifdef CONFIG_MAGIC_SYSRQ 743#ifdef CONFIG_MAGIC_SYSRQ
744static void sysrq_handle_dbg(int key, struct tty_struct *tty) 744static void sysrq_handle_dbg(int key)
745{ 745{
746 if (!dbg_io_ops) { 746 if (!dbg_io_ops) {
747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 28b844118bbd..caf057a3de0e 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
1929 if (argc != 1) 1929 if (argc != 1)
1930 return KDB_ARGCOUNT; 1930 return KDB_ARGCOUNT;
1931 kdb_trap_printk++; 1931 kdb_trap_printk++;
1932 __handle_sysrq(*argv[1], NULL, 0); 1932 __handle_sysrq(*argv[1], false);
1933 kdb_trap_printk--; 1933 kdb_trap_printk--;
1934 1934
1935 return 0; 1935 return 0;
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4dec5f96..b7e4c362361b 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
212 212
213/** 213/**
214 * pm_qos_add_request - inserts new qos request into the list 214 * pm_qos_add_request - inserts new qos request into the list
215 * @pm_qos_class: identifies which list of qos request to us 215 * @dep: pointer to a preallocated handle
216 * @pm_qos_class: identifies which list of qos request to use
216 * @value: defines the qos request 217 * @value: defines the qos request
217 * 218 *
218 * This function inserts a new entry in the pm_qos_class list of requested qos 219 * This function inserts a new entry in the pm_qos_class list of requested qos
219 * performance characteristics. It recomputes the aggregate QoS expectations 220 * performance characteristics. It recomputes the aggregate QoS expectations
220 * for the pm_qos_class of parameters, and returns the pm_qos_request list 221 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
221 * element as a handle for use in updating and removal. Call needs to save 222 * handle. Caller needs to save this handle for later use in updates and
222 * this handle for later use. 223 * removal.
223 */ 224 */
225
224void pm_qos_add_request(struct pm_qos_request_list *dep, 226void pm_qos_add_request(struct pm_qos_request_list *dep,
225 int pm_qos_class, s32 value) 227 int pm_qos_class, s32 value)
226{ 228{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
348 350
349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 351 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
350 if (pm_qos_class >= 0) { 352 if (pm_qos_class >= 0) {
351 struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 353 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
352 if (!req) 354 if (!req)
353 return -ENOMEM; 355 return -ENOMEM;
354 356
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b337006276..d52359374e85 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
24 24
25static DECLARE_WORK(poweroff_work, do_poweroff); 25static DECLARE_WORK(poweroff_work, do_poweroff);
26 26
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b227a21..ab661ebc4895 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p)
3752 3752
3753 raw_spin_lock_irqsave(&rq->lock, flags); 3753 raw_spin_lock_irqsave(&rq->lock, flags);
3754 3754
3755 update_rq_clock(rq);
3756
3755 if (unlikely(task_cpu(p) != this_cpu)) 3757 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu); 3758 __set_task_cpu(p, this_cpu);
3757 3759
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c020e5..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -90,7 +90,8 @@ enum {
90/* 90/*
91 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
92 * 92 *
93 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
94 * 95 *
95 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
198 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else 204#else
204typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
946 unsigned long flags; 948 unsigned long flags;
947 949
948 debug_work_activate(work); 950 debug_work_activate(work);
949 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
950 /* determine gcwq to use */ 955 /* determine gcwq to use */
951 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
989 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
990 995
991 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
992 998
993 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
994 cwq->nr_active++; 1000 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
997 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
998 1006
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
1000 1008
1001 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
1002} 1010}
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1215 * bound), %false if offline. 1223 * bound), %false if offline.
1216 */ 1224 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1218{ 1227{
1219 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1488 * otherwise. 1497 * otherwise.
1489 */ 1498 */
1490static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1491{ 1502{
1492 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1493 return false; 1504 return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 1674
1664 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1665 cwq->nr_active++; 1677 cwq->nr_active++;
1666} 1678}
1667 1679
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1671 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1672 * 1685 *
1673 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1676 * CONTEXT: 1689 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1678 */ 1691 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1680{ 1694{
1681 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1683 return; 1697 return;
1684 1698
1685 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687 1700
1688 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1689 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1690 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1691 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1692 } 1708 }
1693 1709
1694 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */ 1742 */
1727static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1728{ 1746{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1823 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL; 1842 worker->current_work = NULL;
1825 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1827} 1845}
1828 1846
1829/** 1847/**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2388 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2389 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2392 ret = 1; 2411 ret = 1;
2393 } 2412 }
2394 } 2413 }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2791 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2792 goto err; 2811 goto err;
2793 2812
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2797 } 2815 }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2833{ 2851{
2834 unsigned int cpu; 2852 unsigned int cpu;
2835 2853
2854 wq->flags |= WQ_DYING;
2836 flush_workqueue(wq); 2855 flush_workqueue(wq);
2837 2856
2838 /* 2857 /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2857 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2860 } 2880 }
2861 2881
2862 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3239 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3240 */ 3260 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3242{ 3264{
3243 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
3545 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550 3571
3551 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
3570 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker; 3592 struct worker *worker;
3572 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3573 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker); 3597 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);