aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-12-03 15:51:21 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-03 15:51:21 -0500
commita6c872afb2536f47285e6643f4629dec7520041d (patch)
tree4b54e69fc6594f9afc1277520a350db04e578e77 /kernel
parent9fe02668fe48a1d546196bc1392330ff28d9bd57 (diff)
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c20
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/perf_event.c68
-rw-r--r--kernel/power/hibernate.c11
-rw-r--r--kernel/power/swap.c43
-rw-r--r--kernel/rcutree.c60
-rw-r--r--kernel/rcutree.h17
-rw-r--r--kernel/rcutree_plugin.h46
-rw-r--r--kernel/sched.c65
-rw-r--r--kernel/sched_fair.c74
-rw-r--r--kernel/slow-work-debugfs.c227
-rw-r--r--kernel/slow-work.c512
-rw-r--r--kernel/slow-work.h72
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c17
28 files changed, 1128 insertions, 230 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..d7c13d249b2d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
94obj-$(CONFIG_RING_BUFFER) += trace/ 94obj-$(CONFIG_RING_BUFFER) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o 96obj-$(CONFIG_SLOW_WORK) += slow-work.o
97obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
98 99
99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ca83b73fba19..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1710 return -EFAULT; 1710 return -EFAULT;
1711 1711
1712 buffer[nbytes] = 0; /* nul-terminate */ 1712 buffer[nbytes] = 0; /* nul-terminate */
1713 strstrip(buffer);
1714 if (cft->write_u64) { 1713 if (cft->write_u64) {
1715 u64 val = simple_strtoull(buffer, &end, 0); 1714 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
1716 if (*end) 1715 if (*end)
1717 return -EINVAL; 1716 return -EINVAL;
1718 retval = cft->write_u64(cgrp, cft, val); 1717 retval = cft->write_u64(cgrp, cft, val);
1719 } else { 1718 } else {
1720 s64 val = simple_strtoll(buffer, &end, 0); 1719 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
1721 if (*end) 1720 if (*end)
1722 return -EINVAL; 1721 return -EINVAL;
1723 retval = cft->write_s64(cgrp, cft, val); 1722 retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1753 } 1752 }
1754 1753
1755 buffer[nbytes] = 0; /* nul-terminate */ 1754 buffer[nbytes] = 0; /* nul-terminate */
1756 strstrip(buffer); 1755 retval = cft->write_string(cgrp, cft, strstrip(buffer));
1757 retval = cft->write_string(cgrp, cft, buffer);
1758 if (!retval) 1756 if (!retval)
1759 retval = nbytes; 1757 retval = nbytes;
1760out: 1758out:
diff --git a/kernel/exit.c b/kernel/exit.c
index e61891f80123..f7864ac2ecc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid)
359{ 359{
360 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
361 361
362 if (task_session(curr) != pid) { 362 if (task_session(curr) != pid)
363 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
364 proc_sid_connector(curr);
365 }
366 364
367 if (task_pgrp(curr) != pid) 365 if (task_pgrp(curr) != pid)
368 change_pid(curr, PIDTYPE_PGID, pid); 366 change_pid(curr, PIDTYPE_PGID, pid);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4c20fff8c13a..166b8c49257c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,7 +91,7 @@ int nr_processes(void)
91 int cpu; 91 int cpu;
92 int total = 0; 92 int total = 0;
93 93
94 for_each_online_cpu(cpu) 94 for_each_possible_cpu(cpu)
95 total += per_cpu(process_counts, cpu); 95 total += per_cpu(process_counts, cpu);
96 96
97 return total; 97 return total;
diff --git a/kernel/futex.c b/kernel/futex.c
index 4949d336d88d..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
150 */ 150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2) 151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{ 152{
153 return (key1->both.word == key2->both.word 153 return (key1 && key2
154 && key1->both.word == key2->both.word
154 && key1->both.ptr == key2->both.ptr 155 && key1->both.ptr == key2->both.ptr
155 && key1->both.offset == key2->both.offset); 156 && key1->both.offset == key2->both.offset);
156} 157}
@@ -1028,7 +1029,6 @@ static inline
1028void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1029 struct futex_hash_bucket *hb) 1030 struct futex_hash_bucket *hb)
1030{ 1031{
1031 drop_futex_key_refs(&q->key);
1032 get_futex_key_refs(key); 1032 get_futex_key_refs(key);
1033 q->key = *key; 1033 q->key = *key;
1034 1034
@@ -1226,6 +1226,7 @@ retry_private:
1226 */ 1226 */
1227 if (ret == 1) { 1227 if (ret == 1) {
1228 WARN_ON(pi_state); 1228 WARN_ON(pi_state);
1229 drop_count++;
1229 task_count++; 1230 task_count++;
1230 ret = get_futex_value_locked(&curval2, uaddr2); 1231 ret = get_futex_value_locked(&curval2, uaddr2);
1231 if (!ret) 1232 if (!ret)
@@ -1304,6 +1305,7 @@ retry_private:
1304 if (ret == 1) { 1305 if (ret == 1) {
1305 /* We got the lock. */ 1306 /* We got the lock. */
1306 requeue_pi_wake_futex(this, &key2, hb2); 1307 requeue_pi_wake_futex(this, &key2, hb2);
1308 drop_count++;
1307 continue; 1309 continue;
1308 } else if (ret) { 1310 } else if (ret) {
1309 /* -EDEADLK */ 1311 /* -EDEADLK */
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1791 current->timer_slack_ns); 1793 current->timer_slack_ns);
1792 } 1794 }
1793 1795
1796retry:
1794 /* Prepare to wait on uaddr. */ 1797 /* Prepare to wait on uaddr. */
1795 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); 1798 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1796 if (ret) 1799 if (ret)
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1808 goto out_put_key; 1811 goto out_put_key;
1809 1812
1810 /* 1813 /*
1811 * We expect signal_pending(current), but another thread may 1814 * We expect signal_pending(current), but we might be the
1812 * have handled it for us already. 1815 * victim of a spurious wakeup as well.
1813 */ 1816 */
1817 if (!signal_pending(current)) {
1818 put_futex_key(fshared, &q.key);
1819 goto retry;
1820 }
1821
1814 ret = -ERESTARTSYS; 1822 ret = -ERESTARTSYS;
1815 if (!abs_time) 1823 if (!abs_time)
1816 goto out_put_key; 1824 goto out_put_key;
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2118 */ 2126 */
2119 plist_del(&q->list, &q->list.plist); 2127 plist_del(&q->list, &q->list.plist);
2120 2128
2129 /* Handle spurious wakeups gracefully */
2130 ret = -EWOULDBLOCK;
2121 if (timeout && !timeout->task) 2131 if (timeout && !timeout->task)
2122 ret = -ETIMEDOUT; 2132 ret = -ETIMEDOUT;
2123 else 2133 else if (signal_pending(current))
2124 ret = -ERESTARTNOINTR; 2134 ret = -ERESTARTNOINTR;
2125 } 2135 }
2126 return ret; 2136 return ret;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..bd7273e6282e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126} 128}
127 129
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @k: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *k, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168 set_task_cpu(k, cpu);
169 k->cpus_allowed = cpumask_of_cpu(cpu);
170 k->rt.nr_cpus_allowed = 1;
171 k->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
176 * kthread_stop - stop a thread created by kthread_create(). 153 * kthread_stop - stop a thread created by kthread_create().
177 * @k: thread created by kthread_create(). 154 * @k: thread created by kthread_create().
178 * 155 *
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1187 1187
1188 /* Count loaded sections and allocate structures */ 1188 /* Count loaded sections and allocate structures */
1189 for (i = 0; i < nsect; i++) 1189 for (i = 0; i < nsect; i++)
1190 if (sechdrs[i].sh_flags & SHF_ALLOC) 1190 if (sechdrs[i].sh_flags & SHF_ALLOC
1191 && sechdrs[i].sh_size)
1191 nloaded++; 1192 nloaded++;
1192 size[0] = ALIGN(sizeof(*sect_attrs) 1193 size[0] = ALIGN(sizeof(*sect_attrs)
1193 + nloaded * sizeof(sect_attrs->attrs[0]), 1194 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1207 for (i = 0; i < nsect; i++) { 1208 for (i = 0; i < nsect; i++) {
1208 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1209 if (! (sechdrs[i].sh_flags & SHF_ALLOC))
1209 continue; 1210 continue;
1211 if (!sechdrs[i].sh_size)
1212 continue;
1210 sattr->address = sechdrs[i].sh_addr; 1213 sattr->address = sechdrs[i].sh_addr;
1211 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1214 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
1212 GFP_KERNEL); 1215 GFP_KERNEL);
diff --git a/kernel/params.c b/kernel/params.c
index 9da58eabdcb2..d656c276508d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp)
218 return -ENOSPC; 218 return -ENOSPC;
219 } 219 }
220 220
221 if (kp->flags & KPARAM_KMALLOCED)
222 kfree(*(char **)kp->arg);
223
224 /* This is a hack. We can't need to strdup in early boot, and we 221 /* This is a hack. We can't need to strdup in early boot, and we
225 * don't need to; this mangled commandline is preserved. */ 222 * don't need to; this mangled commandline is preserved. */
226 if (slab_is_available()) { 223 if (slab_is_available()) {
227 kp->flags |= KPARAM_KMALLOCED;
228 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 224 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
229 if (!kp->arg) 225 if (!*(char **)kp->arg)
230 return -ENOMEM; 226 return -ENOMEM;
231 } else 227 } else
232 *(const char **)kp->arg = val; 228 *(const char **)kp->arg = val;
@@ -304,6 +300,7 @@ static int param_array(const char *name,
304 unsigned int min, unsigned int max, 300 unsigned int min, unsigned int max,
305 void *elem, int elemsize, 301 void *elem, int elemsize,
306 int (*set)(const char *, struct kernel_param *kp), 302 int (*set)(const char *, struct kernel_param *kp),
303 u16 flags,
307 unsigned int *num) 304 unsigned int *num)
308{ 305{
309 int ret; 306 int ret;
@@ -313,6 +310,7 @@ static int param_array(const char *name,
313 /* Get the name right for errors. */ 310 /* Get the name right for errors. */
314 kp.name = name; 311 kp.name = name;
315 kp.arg = elem; 312 kp.arg = elem;
313 kp.flags = flags;
316 314
317 /* No equals sign? */ 315 /* No equals sign? */
318 if (!val) { 316 if (!val) {
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp)
358 unsigned int temp_num; 356 unsigned int temp_num;
359 357
360 return param_array(kp->name, val, 1, arr->max, arr->elem, 358 return param_array(kp->name, val, 1, arr->max, arr->elem,
361 arr->elemsize, arr->set, arr->num ?: &temp_num); 359 arr->elemsize, arr->set, kp->flags,
360 arr->num ?: &temp_num);
362} 361}
363 362
364int param_array_get(char *buffer, struct kernel_param *kp) 363int param_array_get(char *buffer, struct kernel_param *kp)
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod)
605 604
606void destroy_params(const struct kernel_param *params, unsigned num) 605void destroy_params(const struct kernel_param *params, unsigned num)
607{ 606{
608 unsigned int i; 607 /* FIXME: This should free kmalloced charp parameters. It doesn't. */
609
610 for (i = 0; i < num; i++)
611 if (params[i].flags & KPARAM_KMALLOCED)
612 kfree(*(char **)params[i].arg);
613} 608}
614 609
615static void __init kernel_add_sysfs_param(const char *name, 610static void __init kernel_add_sysfs_param(const char *name,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..7f29643c8985 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1355 u64 interrupts, freq; 1355 u64 interrupts, freq;
1356 1356
1357 spin_lock(&ctx->lock); 1357 spin_lock(&ctx->lock);
1358 list_for_each_entry(event, &ctx->group_list, group_entry) { 1358 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1359 if (event->state != PERF_EVENT_STATE_ACTIVE) 1359 if (event->state != PERF_EVENT_STATE_ACTIVE)
1360 continue; 1360 continue;
1361 1361
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3959 regs = task_pt_regs(current); 3959 regs = task_pt_regs(current);
3960 3960
3961 if (regs) { 3961 if (regs) {
3962 if (perf_event_overflow(event, 0, &data, regs)) 3962 if (!(event->attr.exclude_idle && current->pid == 0))
3963 ret = HRTIMER_NORESTART; 3963 if (perf_event_overflow(event, 0, &data, regs))
3964 ret = HRTIMER_NORESTART;
3964 } 3965 }
3965 3966
3966 period = max_t(u64, 10000, event->hw.sample_period); 3967 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3969 return ret; 3970 return ret;
3970} 3971}
3971 3972
3973static void perf_swevent_start_hrtimer(struct perf_event *event)
3974{
3975 struct hw_perf_event *hwc = &event->hw;
3976
3977 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3978 hwc->hrtimer.function = perf_swevent_hrtimer;
3979 if (hwc->sample_period) {
3980 u64 period;
3981
3982 if (hwc->remaining) {
3983 if (hwc->remaining < 0)
3984 period = 10000;
3985 else
3986 period = hwc->remaining;
3987 hwc->remaining = 0;
3988 } else {
3989 period = max_t(u64, 10000, hwc->sample_period);
3990 }
3991 __hrtimer_start_range_ns(&hwc->hrtimer,
3992 ns_to_ktime(period), 0,
3993 HRTIMER_MODE_REL, 0);
3994 }
3995}
3996
3997static void perf_swevent_cancel_hrtimer(struct perf_event *event)
3998{
3999 struct hw_perf_event *hwc = &event->hw;
4000
4001 if (hwc->sample_period) {
4002 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4003 hwc->remaining = ktime_to_ns(remaining);
4004
4005 hrtimer_cancel(&hwc->hrtimer);
4006 }
4007}
4008
3972/* 4009/*
3973 * Software event: cpu wall time clock 4010 * Software event: cpu wall time clock
3974 */ 4011 */
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3991 int cpu = raw_smp_processor_id(); 4028 int cpu = raw_smp_processor_id();
3992 4029
3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4030 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4031 perf_swevent_start_hrtimer(event);
3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3996 if (hwc->sample_period) {
3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3998 __hrtimer_start_range_ns(&hwc->hrtimer,
3999 ns_to_ktime(period), 0,
4000 HRTIMER_MODE_REL, 0);
4001 }
4002 4032
4003 return 0; 4033 return 0;
4004} 4034}
4005 4035
4006static void cpu_clock_perf_event_disable(struct perf_event *event) 4036static void cpu_clock_perf_event_disable(struct perf_event *event)
4007{ 4037{
4008 if (event->hw.sample_period) 4038 perf_swevent_cancel_hrtimer(event);
4009 hrtimer_cancel(&event->hw.hrtimer);
4010 cpu_clock_perf_event_update(event); 4039 cpu_clock_perf_event_update(event);
4011} 4040}
4012 4041
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
4043 now = event->ctx->time; 4072 now = event->ctx->time;
4044 4073
4045 atomic64_set(&hwc->prev_count, now); 4074 atomic64_set(&hwc->prev_count, now);
4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4075
4047 hwc->hrtimer.function = perf_swevent_hrtimer; 4076 perf_swevent_start_hrtimer(event);
4048 if (hwc->sample_period) {
4049 u64 period = max_t(u64, 10000, hwc->sample_period);
4050 __hrtimer_start_range_ns(&hwc->hrtimer,
4051 ns_to_ktime(period), 0,
4052 HRTIMER_MODE_REL, 0);
4053 }
4054 4077
4055 return 0; 4078 return 0;
4056} 4079}
4057 4080
4058static void task_clock_perf_event_disable(struct perf_event *event) 4081static void task_clock_perf_event_disable(struct perf_event *event)
4059{ 4082{
4060 if (event->hw.sample_period) 4083 perf_swevent_cancel_hrtimer(event);
4061 hrtimer_cancel(&event->hw.hrtimer);
4062 task_clock_perf_event_update(event, event->ctx->time); 4084 task_clock_perf_event_update(event, event->ctx->time);
4063 4085
4064} 4086}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04b3a83d686f..04a9e90d248f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,21 +693,22 @@ static int software_resume(void)
693 /* The snapshot device should not be opened while we're running */ 693 /* The snapshot device should not be opened while we're running */
694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
695 error = -EBUSY; 695 error = -EBUSY;
696 swsusp_close(FMODE_READ);
696 goto Unlock; 697 goto Unlock;
697 } 698 }
698 699
699 pm_prepare_console(); 700 pm_prepare_console();
700 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 701 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
701 if (error) 702 if (error)
702 goto Finish; 703 goto close_finish;
703 704
704 error = usermodehelper_disable(); 705 error = usermodehelper_disable();
705 if (error) 706 if (error)
706 goto Finish; 707 goto close_finish;
707 708
708 error = create_basic_memory_bitmaps(); 709 error = create_basic_memory_bitmaps();
709 if (error) 710 if (error)
710 goto Finish; 711 goto close_finish;
711 712
712 pr_debug("PM: Preparing processes for restore.\n"); 713 pr_debug("PM: Preparing processes for restore.\n");
713 error = prepare_processes(); 714 error = prepare_processes();
@@ -719,6 +720,7 @@ static int software_resume(void)
719 pr_debug("PM: Reading hibernation image.\n"); 720 pr_debug("PM: Reading hibernation image.\n");
720 721
721 error = swsusp_read(&flags); 722 error = swsusp_read(&flags);
723 swsusp_close(FMODE_READ);
722 if (!error) 724 if (!error)
723 hibernation_restore(flags & SF_PLATFORM_MODE); 725 hibernation_restore(flags & SF_PLATFORM_MODE);
724 726
@@ -737,6 +739,9 @@ static int software_resume(void)
737 mutex_unlock(&pm_mutex); 739 mutex_unlock(&pm_mutex);
738 pr_debug("PM: Resume from disk failed.\n"); 740 pr_debug("PM: Resume from disk failed.\n");
739 return error; 741 return error;
742close_finish:
743 swsusp_close(FMODE_READ);
744 goto Finish;
740} 745}
741 746
742late_initcall(software_resume); 747late_initcall(software_resume);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b101cdc4df3f..890f6b11b1d3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle,
314{ 314{
315 unsigned int m; 315 unsigned int m;
316 int ret; 316 int ret;
317 int error = 0;
318 int nr_pages; 317 int nr_pages;
319 int err2; 318 int err2;
320 struct bio *bio; 319 struct bio *bio;
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle,
329 nr_pages = 0; 328 nr_pages = 0;
330 bio = NULL; 329 bio = NULL;
331 do_gettimeofday(&start); 330 do_gettimeofday(&start);
332 do { 331 while (1) {
333 ret = snapshot_read_next(snapshot, PAGE_SIZE); 332 ret = snapshot_read_next(snapshot, PAGE_SIZE);
334 if (ret > 0) { 333 if (ret <= 0)
335 error = swap_write_page(handle, data_of(*snapshot), 334 break;
336 &bio); 335 ret = swap_write_page(handle, data_of(*snapshot), &bio);
337 if (error) 336 if (ret)
338 break; 337 break;
339 if (!(nr_pages % m)) 338 if (!(nr_pages % m))
340 printk("\b\b\b\b%3d%%", nr_pages / m); 339 printk("\b\b\b\b%3d%%", nr_pages / m);
341 nr_pages++; 340 nr_pages++;
342 } 341 }
343 } while (ret > 0);
344 err2 = wait_on_bio_chain(&bio); 342 err2 = wait_on_bio_chain(&bio);
345 do_gettimeofday(&stop); 343 do_gettimeofday(&stop);
346 if (!error) 344 if (!ret)
347 error = err2; 345 ret = err2;
348 if (!error) 346 if (!ret)
349 printk("\b\b\b\bdone\n"); 347 printk("\b\b\b\bdone\n");
348 else
349 printk("\n");
350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
351 return error; 351 return ret;
352} 352}
353 353
354/** 354/**
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle,
536 snapshot_write_finalize(snapshot); 536 snapshot_write_finalize(snapshot);
537 if (!snapshot_image_loaded(snapshot)) 537 if (!snapshot_image_loaded(snapshot))
538 error = -ENODATA; 538 error = -ENODATA;
539 } 539 } else
540 printk("\n");
540 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 541 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
541 return error; 542 return error;
542} 543}
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p)
572 error = load_image(&handle, &snapshot, header->pages - 1); 573 error = load_image(&handle, &snapshot, header->pages - 1);
573 release_swap_reader(&handle); 574 release_swap_reader(&handle);
574 575
575 blkdev_put(resume_bdev, FMODE_READ);
576
577 if (!error) 576 if (!error)
578 pr_debug("PM: Image successfully loaded\n"); 577 pr_debug("PM: Image successfully loaded\n");
579 else 578 else
@@ -596,7 +595,7 @@ int swsusp_check(void)
596 error = bio_read_page(swsusp_resume_block, 595 error = bio_read_page(swsusp_resume_block,
597 swsusp_header, NULL); 596 swsusp_header, NULL);
598 if (error) 597 if (error)
599 return error; 598 goto put;
600 599
601 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { 600 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
602 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 601 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
@@ -604,8 +603,10 @@ int swsusp_check(void)
604 error = bio_write_page(swsusp_resume_block, 603 error = bio_write_page(swsusp_resume_block,
605 swsusp_header, NULL); 604 swsusp_header, NULL);
606 } else { 605 } else {
607 return -EINVAL; 606 error = -EINVAL;
608 } 607 }
608
609put:
609 if (error) 610 if (error)
610 blkdev_put(resume_bdev, FMODE_READ); 611 blkdev_put(resume_bdev, FMODE_READ);
611 else 612 else
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 705f02ac7433..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -59,7 +59,7 @@
59 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
61 }, \ 61 }, \
62 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 63 .gpnum = -300, \
64 .completed = -300, \ 64 .completed = -300, \
65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * irqs disabled. 657 * irqs disabled.
658 */ 658 */
659 rcu_for_each_node_breadth_first(rsp, rnp) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
660 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
661 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
664 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 665 }
666 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
667 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
669} 672}
670 673
@@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706{ 709{
707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
708 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
711} 715}
@@ -913,7 +917,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
913 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 917 spin_unlock(&rnp->lock); /* irqs remain disabled. */
914 break; 918 break;
915 } 919 }
916 rcu_preempt_offline_tasks(rsp, rnp, rdp); 920
921 /*
922 * If there was a task blocking the current grace period,
923 * and if all CPUs have checked in, we need to propagate
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
917 mask = rnp->grpmask; 934 mask = rnp->grpmask;
918 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
919 rnp = rnp->parent; 936 rnp = rnp->parent;
@@ -958,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
958 * Invoke any RCU callbacks that have made it to the end of their grace 975 * Invoke any RCU callbacks that have made it to the end of their grace
959 * period. Thottle as specified by rdp->blimit. 976 * period. Thottle as specified by rdp->blimit.
960 */ 977 */
961static void rcu_do_batch(struct rcu_data *rdp) 978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
962{ 979{
963 unsigned long flags; 980 unsigned long flags;
964 struct rcu_head *next, *list, **tail; 981 struct rcu_head *next, *list, **tail;
@@ -1011,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
1011 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1012 rdp->blimit = blimit; 1029 rdp->blimit = blimit;
1013 1030
1031 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
1014 local_irq_restore(flags); 1038 local_irq_restore(flags);
1015 1039
1016 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1040 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1142,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1142 } 1166 }
1143 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1144 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1145 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1146 1171
1147 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1148 1173
1149 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1150 1175
@@ -1158,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1158 1183
1159 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1160 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1161 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1162 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1163 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1164 } 1190 }
@@ -1224,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1224 } 1250 }
1225 1251
1226 /* If there are callbacks ready, invoke them. */ 1252 /* If there are callbacks ready, invoke them. */
1227 rcu_do_batch(rdp); 1253 rcu_do_batch(rsp, rdp);
1228} 1254}
1229 1255
1230/* 1256/*
@@ -1288,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1288 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1314 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1289 } 1315 }
1290 1316
1291 /* Force the grace period if too many callbacks or too long waiting. */ 1317 /*
1292 if (unlikely(++rdp->qlen > qhimark)) { 1318 * Force the grace period if too many callbacks or too long waiting.
1319 * Enforce hysteresis, and don't invoke force_quiescent_state()
1320 * if some other CPU has recently done so. Also, don't bother
1321 * invoking force_quiescent_state() if the newly enqueued callback
1322 * is the only one waiting for a grace period to complete.
1323 */
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1293 rdp->blimit = LONG_MAX; 1325 rdp->blimit = LONG_MAX;
1294 force_quiescent_state(rsp, 0); 1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1295 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1296 force_quiescent_state(rsp, 1); 1332 force_quiescent_state(rsp, 1);
1297 local_irq_restore(flags); 1333 local_irq_restore(flags);
@@ -1523,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1523 rdp->beenonline = 1; /* We have now been online. */ 1559 rdp->beenonline = 1; /* We have now been online. */
1524 rdp->preemptable = preemptable; 1560 rdp->preemptable = preemptable;
1525 rdp->passed_quiesc_completed = lastcomp - 1; 1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1526 rdp->blimit = blimit; 1564 rdp->blimit = blimit;
1527 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1565 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1528 1566
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index b40ac5706040..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -167,6 +167,10 @@ struct rcu_data {
167 struct rcu_head *nxtlist; 167 struct rcu_head *nxtlist;
168 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 168 struct rcu_head **nxttail[RCU_NEXT_SIZE];
169 long qlen; /* # of queued callbacks */ 169 long qlen; /* # of queued callbacks */
170 long qlen_last_fqs_check;
171 /* qlen at last check for QS forcing */
172 unsigned long n_force_qs_snap;
173 /* did other CPU force QS recently? */
170 long blimit; /* Upper limit on a processed batch */ 174 long blimit; /* Upper limit on a processed batch */
171 175
172#ifdef CONFIG_NO_HZ 176#ifdef CONFIG_NO_HZ
@@ -197,9 +201,10 @@ struct rcu_data {
197}; 201};
198 202
199/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
200#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
201#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
202#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
203#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
204#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
205#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
@@ -302,9 +307,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp);
302#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
303static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 308static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
304#ifdef CONFIG_HOTPLUG_CPU 309#ifdef CONFIG_HOTPLUG_CPU
305static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 310static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
306 struct rcu_node *rnp, 311 struct rcu_node *rnp,
307 struct rcu_data *rdp); 312 struct rcu_data *rdp);
308static void rcu_preempt_offline_cpu(int cpu); 313static void rcu_preempt_offline_cpu(int cpu);
309#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 314#endif /* #ifdef CONFIG_HOTPLUG_CPU */
310static void rcu_preempt_check_callbacks(int cpu); 315static void rcu_preempt_check_callbacks(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c0cb783aa16a..ef2a58c2b9d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
304 * parent is to remove the need for rcu_read_unlock_special() to 304 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 305 * make more than two attempts to acquire the target rcu_node's lock.
306 * 306 *
307 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure.
309 *
307 * The caller must hold rnp->lock with irqs disabled. 310 * The caller must hold rnp->lock with irqs disabled.
308 */ 311 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 312static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 313 struct rcu_node *rnp,
311 struct rcu_data *rdp) 314 struct rcu_data *rdp)
312{ 315{
313 int i; 316 int i;
314 struct list_head *lp; 317 struct list_head *lp;
315 struct list_head *lp_root; 318 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp);
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 320 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 321 struct task_struct *tp;
318 322
319 if (rnp == rnp_root) { 323 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 324 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 325 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 326 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 327 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 328 (!list_empty(&rnp->blocked_tasks[0]) ||
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 347 }
344 } 348 }
349
350 return retval;
345} 351}
346 352
347/* 353/*
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
393EXPORT_SYMBOL_GPL(call_rcu); 399EXPORT_SYMBOL_GPL(call_rcu);
394 400
395/* 401/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the
403 * grace period, but this is the crude slow compatability hack, so just
404 * invoke synchronize_rcu().
405 */
406void synchronize_rcu_expedited(void)
407{
408 synchronize_rcu();
409}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411
412/*
396 * Check to see if there is any immediate preemptable-RCU-related work 413 * Check to see if there is any immediate preemptable-RCU-related work
397 * to be done. 414 * to be done.
398 */ 415 */
@@ -521,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
521 538
522/* 539/*
523 * Because preemptable RCU does not exist, it never needs to migrate 540 * Because preemptable RCU does not exist, it never needs to migrate
524 * tasks that were blocked within RCU read-side critical sections. 541 * tasks that were blocked within RCU read-side critical sections, and
542 * such non-existent tasks cannot possibly have been blocking the current
543 * grace period.
525 */ 544 */
526static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 545static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
527 struct rcu_node *rnp, 546 struct rcu_node *rnp,
528 struct rcu_data *rdp) 547 struct rcu_data *rdp)
529{ 548{
549 return 0;
530} 550}
531 551
532/* 552/*
@@ -565,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
565EXPORT_SYMBOL_GPL(call_rcu); 585EXPORT_SYMBOL_GPL(call_rcu);
566 586
567/* 587/*
588 * Wait for an rcu-preempt grace period, but make it happen quickly.
589 * But because preemptable RCU does not exist, map to rcu-sched.
590 */
591void synchronize_rcu_expedited(void)
592{
593 synchronize_sched_expedited();
594}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596
597/*
568 * Because preemptable RCU does not exist, it never has any work to do. 598 * Because preemptable RCU does not exist, it never has any work to do.
569 */ 599 */
570static int rcu_preempt_pending(int cpu) 600static int rcu_preempt_pending(int cpu)
diff --git a/kernel/sched.c b/kernel/sched.c
index e88689522e66..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1564 1565
1565#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1566 1567
1567struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1568 unsigned long rq_weight[NR_CPUS];
1569};
1570
1571static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1572 1569
1573static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1574 1571
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1578static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1579 unsigned long sd_shares, 1576 unsigned long sd_shares,
1580 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1581 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1582{ 1579{
1583 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1584 int boost = 0; 1581 int boost = 0;
1585 1582
1586 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1587 if (!rq_weight) { 1584 if (!rq_weight) {
1588 boost = 1; 1585 boost = 1;
1589 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1618static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1619{ 1616{
1620 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1621 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1622 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1623 unsigned long flags; 1620 unsigned long flags;
1624 int i; 1621 int i;
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1627 return 0; 1624 return 0;
1628 1625
1629 local_irq_save(flags); 1626 local_irq_save(flags);
1630 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1631 1628
1632 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1633 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1634 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1635 1632
1636 /* 1633 /*
1637 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1652 shares = tg->shares; 1649 shares = tg->shares;
1653 1650
1654 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1655 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1656 1653
1657 local_irq_restore(flags); 1654 local_irq_restore(flags);
1658 1655
@@ -1996,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1996 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1997} 1994}
1998 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
1999#ifdef CONFIG_SMP 2028#ifdef CONFIG_SMP
2000/* 2029/*
2001 * Is this task likely cache-hot: 2030 * Is this task likely cache-hot:
@@ -2008,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2008 /* 2037 /*
2009 * Buddy candidates are cache hot: 2038 * Buddy candidates are cache hot:
2010 */ 2039 */
2011 if (sched_feat(CACHE_HOT_BUDDY) && 2040 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2012 (&p->se == cfs_rq_of(&p->se)->next || 2041 (&p->se == cfs_rq_of(&p->se)->next ||
2013 &p->se == cfs_rq_of(&p->se)->last)) 2042 &p->se == cfs_rq_of(&p->se)->last))
2014 return 1; 2043 return 1;
@@ -9407,6 +9436,10 @@ void __init sched_init(void)
9407#endif /* CONFIG_USER_SCHED */ 9436#endif /* CONFIG_USER_SCHED */
9408#endif /* CONFIG_GROUP_SCHED */ 9437#endif /* CONFIG_GROUP_SCHED */
9409 9438
9439#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9440 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9441 __alignof__(unsigned long));
9442#endif
9410 for_each_possible_cpu(i) { 9443 for_each_possible_cpu(i) {
9411 struct rq *rq; 9444 struct rq *rq;
9412 9445
@@ -9532,13 +9565,13 @@ void __init sched_init(void)
9532 current->sched_class = &fair_sched_class; 9565 current->sched_class = &fair_sched_class;
9533 9566
9534 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9567 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9535 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9568 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9536#ifdef CONFIG_SMP 9569#ifdef CONFIG_SMP
9537#ifdef CONFIG_NO_HZ 9570#ifdef CONFIG_NO_HZ
9538 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9539 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9540#endif 9573#endif
9541 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9542#endif /* SMP */ 9575#endif /* SMP */
9543 9576
9544 perf_event_init(); 9577 perf_event_init();
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..37087a7fac22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
864 885
865 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
866 return cfs_rq->next; 887 se = cfs_rq->next;
867 888
868 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 889 /*
869 return cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
891 */
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
870 896
871 return se; 897 return se;
872} 898}
@@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1568 struct sched_entity *se = &curr->se, *pse = &p->se; 1594 struct sched_entity *se = &curr->se, *pse = &p->se;
1569 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1570 int sync = wake_flags & WF_SYNC; 1596 int sync = wake_flags & WF_SYNC;
1597 int scale = cfs_rq->nr_running >= sched_nr_latency;
1571 1598
1572 update_curr(cfs_rq); 1599 update_curr(cfs_rq);
1573 1600
@@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1582 if (unlikely(se == pse)) 1609 if (unlikely(se == pse))
1583 return; 1610 return;
1584 1611
1585 /* 1612 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1586 * Only set the backward buddy when the current task is still on the
1587 * rq. This can happen when a wakeup gets interleaved with schedule on
1588 * the ->pre_schedule() or idle_balance() point, either of which can
1589 * drop the rq lock.
1590 *
1591 * Also, during early boot the idle thread is in the fair class, for
1592 * obvious reasons its a bad idea to schedule back to the idle thread.
1593 */
1594 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1595 set_last_buddy(se);
1596 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1597 set_next_buddy(pse); 1613 set_next_buddy(pse);
1598 1614
1599 /* 1615 /*
@@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1639 1655
1640 BUG_ON(!pse); 1656 BUG_ON(!pse);
1641 1657
1642 if (wakeup_preempt_entity(se, pse) == 1) 1658 if (wakeup_preempt_entity(se, pse) == 1) {
1643 resched_task(curr); 1659 resched_task(curr);
1660 /*
1661 * Only set the backward buddy when the current task is still
1662 * on the rq. This can happen when a wakeup gets interleaved
1663 * with schedule on the ->pre_schedule() or idle_balance()
1664 * point, either of which can * drop the rq lock.
1665 *
1666 * Also, during early boot the idle thread is in the fair class,
1667 * for obvious reasons its a bad idea to schedule back to it.
1668 */
1669 if (unlikely(!se->on_rq || curr == rq->idle))
1670 return;
1671 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672 set_last_buddy(se);
1673 }
1644} 1674}
1645 1675
1646static struct task_struct *pick_next_task_fair(struct rq *rq) 1676static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1654 1684
1655 do { 1685 do {
1656 se = pick_next_entity(cfs_rq); 1686 se = pick_next_entity(cfs_rq);
1657 /*
1658 * If se was a buddy, clear it so that it will have to earn
1659 * the favour again.
1660 *
1661 * If se was not a buddy, clear the buddies because neither
1662 * was elegible to run, let them earn it again.
1663 *
1664 * IOW. unconditionally clear buddies.
1665 */
1666 __clear_buddies(cfs_rq, NULL);
1667 set_next_entity(cfs_rq, se); 1687 set_next_entity(cfs_rq, se);
1668 cfs_rq = group_cfs_rq(se); 1688 cfs_rq = group_cfs_rq(se);
1669 } while (cfs_rq); 1689 } while (cfs_rq);
diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c
new file mode 100644
index 000000000000..e45c43645298
--- /dev/null
+++ b/kernel/slow-work-debugfs.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for debugfs
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..00889bd3c590 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/debugfs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
@@ -98,6 +95,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 95static struct slow_work slow_work_new_thread; /* new thread starter */
99 96
100/* 97/*
98 * slow work ID allocation (use slow_work_queue_lock)
99 */
100static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
101
102/*
103 * Unregistration tracking to prevent put_ref() from disappearing during module
104 * unload
105 */
106#ifdef CONFIG_MODULES
107static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
108static struct module *slow_work_unreg_module;
109static struct slow_work *slow_work_unreg_work_item;
110static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
111static DEFINE_MUTEX(slow_work_unreg_sync_lock);
112
113static void slow_work_set_thread_processing(int id, struct slow_work *work)
114{
115 if (work)
116 slow_work_thread_processing[id] = work->owner;
117}
118static void slow_work_done_thread_processing(int id, struct slow_work *work)
119{
120 struct module *module = slow_work_thread_processing[id];
121
122 slow_work_thread_processing[id] = NULL;
123 smp_mb();
124 if (slow_work_unreg_work_item == work ||
125 slow_work_unreg_module == module)
126 wake_up_all(&slow_work_unreg_wq);
127}
128static void slow_work_clear_thread_processing(int id)
129{
130 slow_work_thread_processing[id] = NULL;
131}
132#else
133static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
134static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
135static void slow_work_clear_thread_processing(int id) {}
136#endif
137
138/*
139 * Data for tracking currently executing items for indication through /proc
140 */
141#ifdef CONFIG_SLOW_WORK_DEBUG
142struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
143pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
144DEFINE_RWLOCK(slow_work_execs_lock);
145#endif
146
147/*
101 * The queues of work items and the lock governing access to them. These are 148 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 149 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 150 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +152,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 152 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 153 * very slow work items.
107 */ 154 */
108static LIST_HEAD(slow_work_queue); 155LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 156LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 157DEFINE_SPINLOCK(slow_work_queue_lock);
158
159/*
160 * The following are two wait queues that get pinged when a work item is placed
161 * on an empty queue. These allow work items that are hogging a thread by
162 * sleeping in a way that could be deferred to yield their thread and enqueue
163 * themselves.
164 */
165static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
166static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 167
112/* 168/*
113 * The thread controls. A variable used to signal to the threads that they 169 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +182,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 182static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 183static DEFINE_MUTEX(slow_work_user_lock);
128 184
185static inline int slow_work_get_ref(struct slow_work *work)
186{
187 if (work->ops->get_ref)
188 return work->ops->get_ref(work);
189
190 return 0;
191}
192
193static inline void slow_work_put_ref(struct slow_work *work)
194{
195 if (work->ops->put_ref)
196 work->ops->put_ref(work);
197}
198
129/* 199/*
130 * Calculate the maximum number of active threads in the pool that are 200 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 201 * permitted to process very slow work items.
@@ -149,7 +219,7 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 219 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 220 * it, false if there was nothing to do.
151 */ 221 */
152static bool slow_work_execute(void) 222static noinline bool slow_work_execute(int id)
153{ 223{
154 struct slow_work *work = NULL; 224 struct slow_work *work = NULL;
155 unsigned vsmax; 225 unsigned vsmax;
@@ -186,6 +256,13 @@ static bool slow_work_execute(void)
186 } else { 256 } else {
187 very_slow = false; /* avoid the compiler warning */ 257 very_slow = false; /* avoid the compiler warning */
188 } 258 }
259
260 slow_work_set_thread_processing(id, work);
261 if (work) {
262 slow_work_mark_time(work);
263 slow_work_begin_exec(id, work);
264 }
265
189 spin_unlock_irq(&slow_work_queue_lock); 266 spin_unlock_irq(&slow_work_queue_lock);
190 267
191 if (!work) 268 if (!work)
@@ -194,12 +271,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 271 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 272 BUG();
196 273
197 work->ops->execute(work); 274 /* don't execute if the work is in the process of being cancelled */
275 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
276 work->ops->execute(work);
198 277
199 if (very_slow) 278 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 279 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 280 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 281
282 /* wake up anyone waiting for this work to be complete */
283 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
284
285 slow_work_end_exec(id, work);
286
203 /* if someone tried to enqueue the item whilst we were executing it, 287 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 288 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 289 * execute it simultaneously
@@ -219,7 +303,10 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 303 spin_unlock_irq(&slow_work_queue_lock);
220 } 304 }
221 305
222 work->ops->put_ref(work); 306 /* sort out the race between module unloading and put_ref() */
307 slow_work_put_ref(work);
308 slow_work_done_thread_processing(id, work);
309
223 return true; 310 return true;
224 311
225auto_requeue: 312auto_requeue:
@@ -227,15 +314,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 314 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 315 * - don't wake another thread up as we're awake already
229 */ 316 */
317 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 318 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 319 list_add_tail(&work->link, &vslow_work_queue);
232 else 320 else
233 list_add_tail(&work->link, &slow_work_queue); 321 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 322 spin_unlock_irq(&slow_work_queue_lock);
323 slow_work_clear_thread_processing(id);
235 return true; 324 return true;
236} 325}
237 326
238/** 327/**
328 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
329 * work: The work item under execution that wants to sleep
330 * _timeout: Scheduler sleep timeout
331 *
332 * Allow a requeueable work item to sleep on a slow-work processor thread until
333 * that thread is needed to do some other work or the sleep is interrupted by
334 * some other event.
335 *
336 * The caller must set up a wake up event before calling this and must have set
337 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
338 * condition before calling this function as no test is made here.
339 *
340 * False is returned if there is nothing on the queue; true is returned if the
341 * work item should be requeued
342 */
343bool slow_work_sleep_till_thread_needed(struct slow_work *work,
344 signed long *_timeout)
345{
346 wait_queue_head_t *wfo_wq;
347 struct list_head *queue;
348
349 DEFINE_WAIT(wait);
350
351 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
352 wfo_wq = &vslow_work_queue_waits_for_occupation;
353 queue = &vslow_work_queue;
354 } else {
355 wfo_wq = &slow_work_queue_waits_for_occupation;
356 queue = &slow_work_queue;
357 }
358
359 if (!list_empty(queue))
360 return true;
361
362 add_wait_queue_exclusive(wfo_wq, &wait);
363 if (list_empty(queue))
364 *_timeout = schedule_timeout(*_timeout);
365 finish_wait(wfo_wq, &wait);
366
367 return !list_empty(queue);
368}
369EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
370
371/**
239 * slow_work_enqueue - Schedule a slow work item for processing 372 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 373 * @work: The work item to queue
241 * 374 *
@@ -260,16 +393,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 393 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 394 * overly block ones that are just ordinarily slow.
262 * 395 *
263 * Returns 0 if successful, -EAGAIN if not. 396 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
397 * attempted queued)
264 */ 398 */
265int slow_work_enqueue(struct slow_work *work) 399int slow_work_enqueue(struct slow_work *work)
266{ 400{
401 wait_queue_head_t *wfo_wq;
402 struct list_head *queue;
267 unsigned long flags; 403 unsigned long flags;
404 int ret;
405
406 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
407 return -ECANCELED;
268 408
269 BUG_ON(slow_work_user_count <= 0); 409 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 410 BUG_ON(!work);
271 BUG_ON(!work->ops); 411 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 412
274 /* when honouring an enqueue request, we only promise that we will run 413 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 414 * the work function in the future; we do not promise to run it once
@@ -280,8 +419,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 419 * maintaining our promise
281 */ 420 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 421 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
422 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
423 wfo_wq = &vslow_work_queue_waits_for_occupation;
424 queue = &vslow_work_queue;
425 } else {
426 wfo_wq = &slow_work_queue_waits_for_occupation;
427 queue = &slow_work_queue;
428 }
429
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 430 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 431
432 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
433 goto cancelled;
434
285 /* we promise that we will not attempt to execute the work 435 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 436 * function in more than one thread simultaneously
287 * 437 *
@@ -299,25 +449,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 449 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 450 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 451 } else {
302 if (work->ops->get_ref(work) < 0) 452 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 453 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 454 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 455 slow_work_mark_time(work);
306 else 456 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 457 wake_up(&slow_work_thread_wq);
458
459 /* if someone who could be requeued is sleeping on a
460 * thread, then ask them to yield their thread */
461 if (work->link.prev == queue)
462 wake_up(wfo_wq);
309 } 463 }
310 464
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 465 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 466 }
313 return 0; 467 return 0;
314 468
315cant_get_ref: 469cancelled:
470 ret = -ECANCELED;
471failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 472 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 473 return ret;
318} 474}
319EXPORT_SYMBOL(slow_work_enqueue); 475EXPORT_SYMBOL(slow_work_enqueue);
320 476
477static int slow_work_wait(void *word)
478{
479 schedule();
480 return 0;
481}
482
483/**
484 * slow_work_cancel - Cancel a slow work item
485 * @work: The work item to cancel
486 *
487 * This function will cancel a previously enqueued work item. If we cannot
488 * cancel the work item, it is guarenteed to have run when this function
489 * returns.
490 */
491void slow_work_cancel(struct slow_work *work)
492{
493 bool wait = true, put = false;
494
495 set_bit(SLOW_WORK_CANCELLING, &work->flags);
496 smp_mb();
497
498 /* if the work item is a delayed work item with an active timer, we
499 * need to wait for the timer to finish _before_ getting the spinlock,
500 * lest we deadlock against the timer routine
501 *
502 * the timer routine will leave DELAYED set if it notices the
503 * CANCELLING flag in time
504 */
505 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
506 struct delayed_slow_work *dwork =
507 container_of(work, struct delayed_slow_work, work);
508 del_timer_sync(&dwork->timer);
509 }
510
511 spin_lock_irq(&slow_work_queue_lock);
512
513 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
514 /* the timer routine aborted or never happened, so we are left
515 * holding the timer's reference on the item and should just
516 * drop the pending flag and wait for any ongoing execution to
517 * finish */
518 struct delayed_slow_work *dwork =
519 container_of(work, struct delayed_slow_work, work);
520
521 BUG_ON(timer_pending(&dwork->timer));
522 BUG_ON(!list_empty(&work->link));
523
524 clear_bit(SLOW_WORK_DELAYED, &work->flags);
525 put = true;
526 clear_bit(SLOW_WORK_PENDING, &work->flags);
527
528 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
529 !list_empty(&work->link)) {
530 /* the link in the pending queue holds a reference on the item
531 * that we will need to release */
532 list_del_init(&work->link);
533 wait = false;
534 put = true;
535 clear_bit(SLOW_WORK_PENDING, &work->flags);
536
537 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
538 /* the executor is holding our only reference on the item, so
539 * we merely need to wait for it to finish executing */
540 clear_bit(SLOW_WORK_PENDING, &work->flags);
541 }
542
543 spin_unlock_irq(&slow_work_queue_lock);
544
545 /* the EXECUTING flag is set by the executor whilst the spinlock is set
546 * and before the item is dequeued - so assuming the above doesn't
547 * actually dequeue it, simply waiting for the EXECUTING flag to be
548 * released here should be sufficient */
549 if (wait)
550 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
551 TASK_UNINTERRUPTIBLE);
552
553 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
554 if (put)
555 slow_work_put_ref(work);
556}
557EXPORT_SYMBOL(slow_work_cancel);
558
559/*
560 * Handle expiry of the delay timer, indicating that a delayed slow work item
561 * should now be queued if not cancelled
562 */
563static void delayed_slow_work_timer(unsigned long data)
564{
565 wait_queue_head_t *wfo_wq;
566 struct list_head *queue;
567 struct slow_work *work = (struct slow_work *) data;
568 unsigned long flags;
569 bool queued = false, put = false, first = false;
570
571 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
572 wfo_wq = &vslow_work_queue_waits_for_occupation;
573 queue = &vslow_work_queue;
574 } else {
575 wfo_wq = &slow_work_queue_waits_for_occupation;
576 queue = &slow_work_queue;
577 }
578
579 spin_lock_irqsave(&slow_work_queue_lock, flags);
580 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
581 clear_bit(SLOW_WORK_DELAYED, &work->flags);
582
583 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
584 /* we discard the reference the timer was holding in
585 * favour of the one the executor holds */
586 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
587 put = true;
588 } else {
589 slow_work_mark_time(work);
590 list_add_tail(&work->link, queue);
591 queued = true;
592 if (work->link.prev == queue)
593 first = true;
594 }
595 }
596
597 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
598 if (put)
599 slow_work_put_ref(work);
600 if (first)
601 wake_up(wfo_wq);
602 if (queued)
603 wake_up(&slow_work_thread_wq);
604}
605
606/**
607 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
608 * @dwork: The delayed work item to queue
609 * @delay: When to start executing the work, in jiffies from now
610 *
611 * This is similar to slow_work_enqueue(), but it adds a delay before the work
612 * is actually queued for processing.
613 *
614 * The item can have delayed processing requested on it whilst it is being
615 * executed. The delay will begin immediately, and if it expires before the
616 * item finishes executing, the item will be placed back on the queue when it
617 * has done executing.
618 */
619int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
620 unsigned long delay)
621{
622 struct slow_work *work = &dwork->work;
623 unsigned long flags;
624 int ret;
625
626 if (delay == 0)
627 return slow_work_enqueue(&dwork->work);
628
629 BUG_ON(slow_work_user_count <= 0);
630 BUG_ON(!work);
631 BUG_ON(!work->ops);
632
633 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
634 return -ECANCELED;
635
636 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
637 spin_lock_irqsave(&slow_work_queue_lock, flags);
638
639 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
640 goto cancelled;
641
642 /* the timer holds a reference whilst it is pending */
643 ret = work->ops->get_ref(work);
644 if (ret < 0)
645 goto cant_get_ref;
646
647 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
648 BUG();
649 dwork->timer.expires = jiffies + delay;
650 dwork->timer.data = (unsigned long) work;
651 dwork->timer.function = delayed_slow_work_timer;
652 add_timer(&dwork->timer);
653
654 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
655 }
656
657 return 0;
658
659cancelled:
660 ret = -ECANCELED;
661cant_get_ref:
662 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
663 return ret;
664}
665EXPORT_SYMBOL(delayed_slow_work_enqueue);
666
321/* 667/*
322 * Schedule a cull of the thread pool at some time in the near future 668 * Schedule a cull of the thread pool at some time in the near future
323 */ 669 */
@@ -368,13 +714,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 714 */
369static int slow_work_thread(void *_data) 715static int slow_work_thread(void *_data)
370{ 716{
371 int vsmax; 717 int vsmax, id;
372 718
373 DEFINE_WAIT(wait); 719 DEFINE_WAIT(wait);
374 720
375 set_freezable(); 721 set_freezable();
376 set_user_nice(current, -5); 722 set_user_nice(current, -5);
377 723
724 /* allocate ourselves an ID */
725 spin_lock_irq(&slow_work_queue_lock);
726 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
727 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
728 __set_bit(id, slow_work_ids);
729 slow_work_set_thread_pid(id, current->pid);
730 spin_unlock_irq(&slow_work_queue_lock);
731
732 sprintf(current->comm, "kslowd%03u", id);
733
378 for (;;) { 734 for (;;) {
379 vsmax = vslow_work_proportion; 735 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 736 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +751,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 751 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 752 vsmax /= 100;
397 753
398 if (slow_work_available(vsmax) && slow_work_execute()) { 754 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 755 cond_resched();
400 if (list_empty(&slow_work_queue) && 756 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 757 list_empty(&vslow_work_queue) &&
@@ -412,6 +768,11 @@ static int slow_work_thread(void *_data)
412 break; 768 break;
413 } 769 }
414 770
771 spin_lock_irq(&slow_work_queue_lock);
772 slow_work_set_thread_pid(id, 0);
773 __clear_bit(id, slow_work_ids);
774 spin_unlock_irq(&slow_work_queue_lock);
775
415 if (atomic_dec_and_test(&slow_work_thread_count)) 776 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 777 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 778 return 0;
@@ -427,21 +788,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 788}
428 789
429/* 790/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 791 * Start a new slow work thread
446 */ 792 */
447static void slow_work_new_thread_execute(struct slow_work *work) 793static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +821,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 821}
476 822
477static const struct slow_work_ops slow_work_new_thread_ops = { 823static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 824 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 825 .execute = slow_work_new_thread_execute,
826#ifdef CONFIG_SLOW_WORK_DEBUG
827 .desc = slow_work_new_thread_desc,
828#endif
481}; 829};
482 830
483/* 831/*
@@ -546,12 +894,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 894
547/** 895/**
548 * slow_work_register_user - Register a user of the facility 896 * slow_work_register_user - Register a user of the facility
897 * @module: The module about to make use of the facility
549 * 898 *
550 * Register a user of the facility, starting up the initial threads if there 899 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 900 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 901 * an error if not.
553 */ 902 */
554int slow_work_register_user(void) 903int slow_work_register_user(struct module *module)
555{ 904{
556 struct task_struct *p; 905 struct task_struct *p;
557 int loop; 906 int loop;
@@ -598,14 +947,81 @@ error:
598} 947}
599EXPORT_SYMBOL(slow_work_register_user); 948EXPORT_SYMBOL(slow_work_register_user);
600 949
950/*
951 * wait for all outstanding items from the calling module to complete
952 * - note that more items may be queued whilst we're waiting
953 */
954static void slow_work_wait_for_items(struct module *module)
955{
956#ifdef CONFIG_MODULES
957 DECLARE_WAITQUEUE(myself, current);
958 struct slow_work *work;
959 int loop;
960
961 mutex_lock(&slow_work_unreg_sync_lock);
962 add_wait_queue(&slow_work_unreg_wq, &myself);
963
964 for (;;) {
965 spin_lock_irq(&slow_work_queue_lock);
966
967 /* first of all, we wait for the last queued item in each list
968 * to be processed */
969 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
970 if (work->owner == module) {
971 set_current_state(TASK_UNINTERRUPTIBLE);
972 slow_work_unreg_work_item = work;
973 goto do_wait;
974 }
975 }
976 list_for_each_entry_reverse(work, &slow_work_queue, link) {
977 if (work->owner == module) {
978 set_current_state(TASK_UNINTERRUPTIBLE);
979 slow_work_unreg_work_item = work;
980 goto do_wait;
981 }
982 }
983
984 /* then we wait for the items being processed to finish */
985 slow_work_unreg_module = module;
986 smp_mb();
987 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
988 if (slow_work_thread_processing[loop] == module)
989 goto do_wait;
990 }
991 spin_unlock_irq(&slow_work_queue_lock);
992 break; /* okay, we're done */
993
994 do_wait:
995 spin_unlock_irq(&slow_work_queue_lock);
996 schedule();
997 slow_work_unreg_work_item = NULL;
998 slow_work_unreg_module = NULL;
999 }
1000
1001 remove_wait_queue(&slow_work_unreg_wq, &myself);
1002 mutex_unlock(&slow_work_unreg_sync_lock);
1003#endif /* CONFIG_MODULES */
1004}
1005
601/** 1006/**
602 * slow_work_unregister_user - Unregister a user of the facility 1007 * slow_work_unregister_user - Unregister a user of the facility
1008 * @module: The module whose items should be cleared
603 * 1009 *
604 * Unregister a user of the facility, killing all the threads if this was the 1010 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 1011 * last one.
1012 *
1013 * This waits for all the work items belonging to the nominated module to go
1014 * away before proceeding.
606 */ 1015 */
607void slow_work_unregister_user(void) 1016void slow_work_unregister_user(struct module *module)
608{ 1017{
1018 /* first of all, wait for all outstanding items from the calling module
1019 * to complete */
1020 if (module)
1021 slow_work_wait_for_items(module);
1022
1023 /* then we can actually go about shutting down the facility if need
1024 * be */
609 mutex_lock(&slow_work_user_lock); 1025 mutex_lock(&slow_work_user_lock);
610 1026
611 BUG_ON(slow_work_user_count <= 0); 1027 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1055,16 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1055 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1056 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1057#endif
1058#ifdef CONFIG_SLOW_WORK_DEBUG
1059 {
1060 struct dentry *dbdir;
1061
1062 dbdir = debugfs_create_dir("slow_work", NULL);
1063 if (dbdir && !IS_ERR(dbdir))
1064 debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
1065 NULL, &slow_work_runqueue_fops);
1066 }
1067#endif
642 return 0; 1068 return 0;
643} 1069}
644 1070
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..321f3c59d732
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_DEBUG
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-debugfs.c
34 */
35#ifdef CONFIG_SLOW_WORK_DEBUG
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..ce17760d9c51 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid)
1110 err = session; 1110 err = session;
1111out: 1111out:
1112 write_unlock_irq(&tasklist_lock); 1112 write_unlock_irq(&tasklist_lock);
1113 if (err > 0)
1114 proc_sid_connector(group_leader);
1113 return err; 1115 return err;
1114} 1116}
1115 1117
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1546 if (arg4 | arg5) 1548 if (arg4 | arg5)
1547 return -EINVAL; 1549 return -EINVAL;
1548 switch (arg2) { 1550 switch (arg2) {
1549 case 0: 1551 case PR_MCE_KILL_CLEAR:
1550 if (arg3 != 0) 1552 if (arg3 != 0)
1551 return -EINVAL; 1553 return -EINVAL;
1552 current->flags &= ~PF_MCE_PROCESS; 1554 current->flags &= ~PF_MCE_PROCESS;
1553 break; 1555 break;
1554 case 1: 1556 case PR_MCE_KILL_SET:
1555 current->flags |= PF_MCE_PROCESS; 1557 current->flags |= PF_MCE_PROCESS;
1556 if (arg3 != 0) 1558 if (arg3 == PR_MCE_KILL_EARLY)
1557 current->flags |= PF_MCE_EARLY; 1559 current->flags |= PF_MCE_EARLY;
1558 else 1560 else if (arg3 == PR_MCE_KILL_LATE)
1559 current->flags &= ~PF_MCE_EARLY; 1561 current->flags &= ~PF_MCE_EARLY;
1562 else if (arg3 == PR_MCE_KILL_DEFAULT)
1563 current->flags &=
1564 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1565 else
1566 return -EINVAL;
1560 break; 1567 break;
1561 default: 1568 default:
1562 return -EINVAL; 1569 return -EINVAL;
1563 } 1570 }
1564 error = 0; 1571 error = 0;
1565 break; 1572 break;
1566 1573 case PR_MCE_KILL_GET:
1574 if (arg2 | arg3 | arg4 | arg5)
1575 return -EINVAL;
1576 if (current->flags & PF_MCE_PROCESS)
1577 error = (current->flags & PF_MCE_EARLY) ?
1578 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1579 else
1580 error = PR_MCE_KILL_DEFAULT;
1581 break;
1567 default: 1582 default:
1568 error = -EINVAL; 1583 error = -EINVAL;
1569 break; 1584 break;
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..b6e7aaea4604 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1521 if (!table->ctl_name && table->strategy) 1521 if (!table->ctl_name && table->strategy)
1522 set_fail(&fail, table, "Strategy without ctl_name"); 1522 set_fail(&fail, table, "Strategy without ctl_name");
1523#endif 1523#endif
1524#ifdef CONFIG_PROC_FS 1524#ifdef CONFIG_PROC_SYSCTL
1525 if (table->procname && !table->proc_handler) 1525 if (table->procname && !table->proc_handler)
1526 set_fail(&fail, table, "No proc_handler"); 1526 set_fail(&fail, table, "No proc_handler");
1527#endif 1527#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37ba67e33265..6dc4e5ef7a01 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -740,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
740 out: 740 out:
741 mutex_unlock(&ftrace_profile_lock); 741 mutex_unlock(&ftrace_profile_lock);
742 742
743 filp->f_pos += cnt; 743 *ppos += cnt;
744 744
745 return cnt; 745 return cnt;
746} 746}
@@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2222 ret = ftrace_process_regex(parser->buffer, 2222 ret = ftrace_process_regex(parser->buffer,
2223 parser->idx, enable); 2223 parser->idx, enable);
2224 if (ret) 2224 if (ret)
2225 goto out; 2225 goto out_unlock;
2226 2226
2227 trace_parser_clear(parser); 2227 trace_parser_clear(parser);
2228 } 2228 }
2229 2229
2230 ret = read; 2230 ret = read;
2231 2231out_unlock:
2232 mutex_unlock(&ftrace_regex_lock); 2232 mutex_unlock(&ftrace_regex_lock);
2233out: 2233
2234 return ret; 2234 return ret;
2235} 2235}
2236 2236
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..5dd017fea6f5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -483,7 +483,7 @@ struct ring_buffer_iter {
483/* Up this if you want to test the TIME_EXTENTS and normalization */ 483/* Up this if you want to test the TIME_EXTENTS and normalization */
484#define DEBUG_SHIFT 0 484#define DEBUG_SHIFT 0
485 485
486static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 486static inline u64 rb_time_stamp(struct ring_buffer *buffer)
487{ 487{
488 /* shift to debug/test normalization and TIME_EXTENTS */ 488 /* shift to debug/test normalization and TIME_EXTENTS */
489 return buffer->clock() << DEBUG_SHIFT; 489 return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
494 u64 time; 494 u64 time;
495 495
496 preempt_disable_notrace(); 496 preempt_disable_notrace();
497 time = rb_time_stamp(buffer, cpu); 497 time = rb_time_stamp(buffer);
498 preempt_enable_no_resched_notrace(); 498 preempt_enable_no_resched_notrace();
499 499
500 return time; 500 return time;
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
599} 599}
600 600
601/* 601/*
602 * rb_is_head_page - test if the give page is the head page 602 * rb_is_head_page - test if the given page is the head page
603 * 603 *
604 * Because the reader may move the head_page pointer, we can 604 * Because the reader may move the head_page pointer, we can
605 * not trust what the head page is (it may be pointing to 605 * not trust what the head page is (it may be pointing to
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 atomic_inc(&cpu_buffer->record_disabled); 1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched(); 1194 synchronize_sched();
1195 1195
1196 spin_lock_irq(&cpu_buffer->reader_lock);
1196 rb_head_page_deactivate(cpu_buffer); 1197 rb_head_page_deactivate(cpu_buffer);
1197 1198
1198 for (i = 0; i < nr_pages; i++) { 1199 for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1207 return; 1208 return;
1208 1209
1209 rb_reset_cpu(cpu_buffer); 1210 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1210 1212
1211 rb_check_pages(cpu_buffer); 1213 rb_check_pages(cpu_buffer);
1212 1214
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1868 * Nested commits always have zero deltas, so 1870 * Nested commits always have zero deltas, so
1869 * just reread the time stamp 1871 * just reread the time stamp
1870 */ 1872 */
1871 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1873 *ts = rb_time_stamp(buffer);
1872 next_page->page->time_stamp = *ts; 1874 next_page->page->time_stamp = *ts;
1873 } 1875 }
1874 1876
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2112 goto out_fail; 2114 goto out_fail;
2113 2115
2114 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2116 ts = rb_time_stamp(cpu_buffer->buffer);
2115 2117
2116 /* 2118 /*
2117 * Only the first commit can update the timestamp. 2119 * Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2681EXPORT_SYMBOL_GPL(ring_buffer_entries); 2683EXPORT_SYMBOL_GPL(ring_buffer_entries);
2682 2684
2683/** 2685/**
2684 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2686 * ring_buffer_overruns - get the number of overruns in buffer
2685 * @buffer: The ring buffer 2687 * @buffer: The ring buffer
2686 * 2688 *
2687 * Returns the total number of overruns in the ring buffer 2689 * Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c820b0310a12..b20d3ec75de9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2440 return ret; 2440 return ret;
2441 } 2441 }
2442 2442
2443 filp->f_pos += cnt; 2443 *ppos += cnt;
2444 2444
2445 return cnt; 2445 return cnt;
2446} 2446}
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2582 } 2582 }
2583 mutex_unlock(&trace_types_lock); 2583 mutex_unlock(&trace_types_lock);
2584 2584
2585 filp->f_pos += cnt; 2585 *ppos += cnt;
2586 2586
2587 return cnt; 2587 return cnt;
2588} 2588}
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2764 if (err) 2764 if (err)
2765 return err; 2765 return err;
2766 2766
2767 filp->f_pos += ret; 2767 *ppos += ret;
2768 2768
2769 return ret; 2769 return ret;
2770} 2770}
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3299 } 3299 }
3300 } 3300 }
3301 3301
3302 filp->f_pos += cnt; 3302 *ppos += cnt;
3303 3303
3304 /* If check pages failed, return ENOMEM */ 3304 /* If check pages failed, return ENOMEM */
3305 if (tracing_disabled) 3305 if (tracing_disabled)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ed17565826b0..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
69 * @s: trace sequence descriptor 69 * @s: trace sequence descriptor
70 * @fmt: printf format string 70 * @fmt: printf format string
71 * 71 *
72 * It returns 0 if the trace oversizes the buffer's free
73 * space, 1 otherwise.
74 *
72 * The tracer may use either sequence operations or its own 75 * The tracer may use either sequence operations or its own
73 * copy to user routines. To simplify formating of a trace 76 * copy to user routines. To simplify formating of a trace
74 * trace_seq_printf is used to store strings into a special 77 * trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
95 98
96 s->len += ret; 99 s->len += ret;
97 100
98 return len; 101 return 1;
99} 102}
100EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
101 104
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
685int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
686{ 686{
687 int cpu; 687 int cpu;
688 int orig = -1;
688 struct work_struct *works; 689 struct work_struct *works;
689 690
690 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
692 return -ENOMEM; 693 return -ENOMEM;
693 694
694 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
695 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
696 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
697 707
698 INIT_WORK(work, func); 708 INIT_WORK(work, func);
699 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
700 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
701 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
702 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
703 put_online_cpus(); 718 put_online_cpus();
704 free_percpu(works); 719 free_percpu(works);
705 return 0; 720 return 0;