aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c20
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/perf_event.c68
-rw-r--r--kernel/power/hibernate.c11
-rw-r--r--kernel/power/suspend_test.c5
-rw-r--r--kernel/power/swap.c43
-rw-r--r--kernel/rcutree.c60
-rw-r--r--kernel/rcutree.h17
-rw-r--r--kernel/rcutree_plugin.h46
-rw-r--r--kernel/sched.c65
-rw-r--r--kernel/sched_fair.c74
-rw-r--r--kernel/slow-work-proc.c227
-rw-r--r--kernel/slow-work.c494
-rw-r--r--kernel/slow-work.h72
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c17
28 files changed, 1109 insertions, 231 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..776ffed1556d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
94obj-$(CONFIG_RING_BUFFER) += trace/ 94obj-$(CONFIG_RING_BUFFER) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o 96obj-$(CONFIG_SLOW_WORK) += slow-work.o
97obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
98 99
99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ca83b73fba19..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1710 return -EFAULT; 1710 return -EFAULT;
1711 1711
1712 buffer[nbytes] = 0; /* nul-terminate */ 1712 buffer[nbytes] = 0; /* nul-terminate */
1713 strstrip(buffer);
1714 if (cft->write_u64) { 1713 if (cft->write_u64) {
1715 u64 val = simple_strtoull(buffer, &end, 0); 1714 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
1716 if (*end) 1715 if (*end)
1717 return -EINVAL; 1716 return -EINVAL;
1718 retval = cft->write_u64(cgrp, cft, val); 1717 retval = cft->write_u64(cgrp, cft, val);
1719 } else { 1718 } else {
1720 s64 val = simple_strtoll(buffer, &end, 0); 1719 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
1721 if (*end) 1720 if (*end)
1722 return -EINVAL; 1721 return -EINVAL;
1723 retval = cft->write_s64(cgrp, cft, val); 1722 retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1753 } 1752 }
1754 1753
1755 buffer[nbytes] = 0; /* nul-terminate */ 1754 buffer[nbytes] = 0; /* nul-terminate */
1756 strstrip(buffer); 1755 retval = cft->write_string(cgrp, cft, strstrip(buffer));
1757 retval = cft->write_string(cgrp, cft, buffer);
1758 if (!retval) 1756 if (!retval)
1759 retval = nbytes; 1757 retval = nbytes;
1760out: 1758out:
diff --git a/kernel/exit.c b/kernel/exit.c
index e61891f80123..f7864ac2ecc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid)
359{ 359{
360 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
361 361
362 if (task_session(curr) != pid) { 362 if (task_session(curr) != pid)
363 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
364 proc_sid_connector(curr);
365 }
366 364
367 if (task_pgrp(curr) != pid) 365 if (task_pgrp(curr) != pid)
368 change_pid(curr, PIDTYPE_PGID, pid); 366 change_pid(curr, PIDTYPE_PGID, pid);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4c20fff8c13a..166b8c49257c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,7 +91,7 @@ int nr_processes(void)
91 int cpu; 91 int cpu;
92 int total = 0; 92 int total = 0;
93 93
94 for_each_online_cpu(cpu) 94 for_each_possible_cpu(cpu)
95 total += per_cpu(process_counts, cpu); 95 total += per_cpu(process_counts, cpu);
96 96
97 return total; 97 return total;
diff --git a/kernel/futex.c b/kernel/futex.c
index 4949d336d88d..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
150 */ 150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2) 151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{ 152{
153 return (key1->both.word == key2->both.word 153 return (key1 && key2
154 && key1->both.word == key2->both.word
154 && key1->both.ptr == key2->both.ptr 155 && key1->both.ptr == key2->both.ptr
155 && key1->both.offset == key2->both.offset); 156 && key1->both.offset == key2->both.offset);
156} 157}
@@ -1028,7 +1029,6 @@ static inline
1028void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1029 struct futex_hash_bucket *hb) 1030 struct futex_hash_bucket *hb)
1030{ 1031{
1031 drop_futex_key_refs(&q->key);
1032 get_futex_key_refs(key); 1032 get_futex_key_refs(key);
1033 q->key = *key; 1033 q->key = *key;
1034 1034
@@ -1226,6 +1226,7 @@ retry_private:
1226 */ 1226 */
1227 if (ret == 1) { 1227 if (ret == 1) {
1228 WARN_ON(pi_state); 1228 WARN_ON(pi_state);
1229 drop_count++;
1229 task_count++; 1230 task_count++;
1230 ret = get_futex_value_locked(&curval2, uaddr2); 1231 ret = get_futex_value_locked(&curval2, uaddr2);
1231 if (!ret) 1232 if (!ret)
@@ -1304,6 +1305,7 @@ retry_private:
1304 if (ret == 1) { 1305 if (ret == 1) {
1305 /* We got the lock. */ 1306 /* We got the lock. */
1306 requeue_pi_wake_futex(this, &key2, hb2); 1307 requeue_pi_wake_futex(this, &key2, hb2);
1308 drop_count++;
1307 continue; 1309 continue;
1308 } else if (ret) { 1310 } else if (ret) {
1309 /* -EDEADLK */ 1311 /* -EDEADLK */
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1791 current->timer_slack_ns); 1793 current->timer_slack_ns);
1792 } 1794 }
1793 1795
1796retry:
1794 /* Prepare to wait on uaddr. */ 1797 /* Prepare to wait on uaddr. */
1795 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); 1798 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1796 if (ret) 1799 if (ret)
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1808 goto out_put_key; 1811 goto out_put_key;
1809 1812
1810 /* 1813 /*
1811 * We expect signal_pending(current), but another thread may 1814 * We expect signal_pending(current), but we might be the
1812 * have handled it for us already. 1815 * victim of a spurious wakeup as well.
1813 */ 1816 */
1817 if (!signal_pending(current)) {
1818 put_futex_key(fshared, &q.key);
1819 goto retry;
1820 }
1821
1814 ret = -ERESTARTSYS; 1822 ret = -ERESTARTSYS;
1815 if (!abs_time) 1823 if (!abs_time)
1816 goto out_put_key; 1824 goto out_put_key;
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2118 */ 2126 */
2119 plist_del(&q->list, &q->list.plist); 2127 plist_del(&q->list, &q->list.plist);
2120 2128
2129 /* Handle spurious wakeups gracefully */
2130 ret = -EWOULDBLOCK;
2121 if (timeout && !timeout->task) 2131 if (timeout && !timeout->task)
2122 ret = -ETIMEDOUT; 2132 ret = -ETIMEDOUT;
2123 else 2133 else if (signal_pending(current))
2124 ret = -ERESTARTNOINTR; 2134 ret = -ERESTARTNOINTR;
2125 } 2135 }
2126 return ret; 2136 return ret;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..bd7273e6282e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126} 128}
127 129
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @k: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *k, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168 set_task_cpu(k, cpu);
169 k->cpus_allowed = cpumask_of_cpu(cpu);
170 k->rt.nr_cpus_allowed = 1;
171 k->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
176 * kthread_stop - stop a thread created by kthread_create(). 153 * kthread_stop - stop a thread created by kthread_create().
177 * @k: thread created by kthread_create(). 154 * @k: thread created by kthread_create().
178 * 155 *
diff --git a/kernel/params.c b/kernel/params.c
index 9da58eabdcb2..d656c276508d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp)
218 return -ENOSPC; 218 return -ENOSPC;
219 } 219 }
220 220
221 if (kp->flags & KPARAM_KMALLOCED)
222 kfree(*(char **)kp->arg);
223
224 /* This is a hack. We can't need to strdup in early boot, and we 221 /* This is a hack. We can't need to strdup in early boot, and we
225 * don't need to; this mangled commandline is preserved. */ 222 * don't need to; this mangled commandline is preserved. */
226 if (slab_is_available()) { 223 if (slab_is_available()) {
227 kp->flags |= KPARAM_KMALLOCED;
228 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 224 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
229 if (!kp->arg) 225 if (!*(char **)kp->arg)
230 return -ENOMEM; 226 return -ENOMEM;
231 } else 227 } else
232 *(const char **)kp->arg = val; 228 *(const char **)kp->arg = val;
@@ -304,6 +300,7 @@ static int param_array(const char *name,
304 unsigned int min, unsigned int max, 300 unsigned int min, unsigned int max,
305 void *elem, int elemsize, 301 void *elem, int elemsize,
306 int (*set)(const char *, struct kernel_param *kp), 302 int (*set)(const char *, struct kernel_param *kp),
303 u16 flags,
307 unsigned int *num) 304 unsigned int *num)
308{ 305{
309 int ret; 306 int ret;
@@ -313,6 +310,7 @@ static int param_array(const char *name,
313 /* Get the name right for errors. */ 310 /* Get the name right for errors. */
314 kp.name = name; 311 kp.name = name;
315 kp.arg = elem; 312 kp.arg = elem;
313 kp.flags = flags;
316 314
317 /* No equals sign? */ 315 /* No equals sign? */
318 if (!val) { 316 if (!val) {
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp)
358 unsigned int temp_num; 356 unsigned int temp_num;
359 357
360 return param_array(kp->name, val, 1, arr->max, arr->elem, 358 return param_array(kp->name, val, 1, arr->max, arr->elem,
361 arr->elemsize, arr->set, arr->num ?: &temp_num); 359 arr->elemsize, arr->set, kp->flags,
360 arr->num ?: &temp_num);
362} 361}
363 362
364int param_array_get(char *buffer, struct kernel_param *kp) 363int param_array_get(char *buffer, struct kernel_param *kp)
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod)
605 604
606void destroy_params(const struct kernel_param *params, unsigned num) 605void destroy_params(const struct kernel_param *params, unsigned num)
607{ 606{
608 unsigned int i; 607 /* FIXME: This should free kmalloced charp parameters. It doesn't. */
609
610 for (i = 0; i < num; i++)
611 if (params[i].flags & KPARAM_KMALLOCED)
612 kfree(*(char **)params[i].arg);
613} 608}
614 609
615static void __init kernel_add_sysfs_param(const char *name, 610static void __init kernel_add_sysfs_param(const char *name,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..7f29643c8985 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1355 u64 interrupts, freq; 1355 u64 interrupts, freq;
1356 1356
1357 spin_lock(&ctx->lock); 1357 spin_lock(&ctx->lock);
1358 list_for_each_entry(event, &ctx->group_list, group_entry) { 1358 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1359 if (event->state != PERF_EVENT_STATE_ACTIVE) 1359 if (event->state != PERF_EVENT_STATE_ACTIVE)
1360 continue; 1360 continue;
1361 1361
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3959 regs = task_pt_regs(current); 3959 regs = task_pt_regs(current);
3960 3960
3961 if (regs) { 3961 if (regs) {
3962 if (perf_event_overflow(event, 0, &data, regs)) 3962 if (!(event->attr.exclude_idle && current->pid == 0))
3963 ret = HRTIMER_NORESTART; 3963 if (perf_event_overflow(event, 0, &data, regs))
3964 ret = HRTIMER_NORESTART;
3964 } 3965 }
3965 3966
3966 period = max_t(u64, 10000, event->hw.sample_period); 3967 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3969 return ret; 3970 return ret;
3970} 3971}
3971 3972
3973static void perf_swevent_start_hrtimer(struct perf_event *event)
3974{
3975 struct hw_perf_event *hwc = &event->hw;
3976
3977 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3978 hwc->hrtimer.function = perf_swevent_hrtimer;
3979 if (hwc->sample_period) {
3980 u64 period;
3981
3982 if (hwc->remaining) {
3983 if (hwc->remaining < 0)
3984 period = 10000;
3985 else
3986 period = hwc->remaining;
3987 hwc->remaining = 0;
3988 } else {
3989 period = max_t(u64, 10000, hwc->sample_period);
3990 }
3991 __hrtimer_start_range_ns(&hwc->hrtimer,
3992 ns_to_ktime(period), 0,
3993 HRTIMER_MODE_REL, 0);
3994 }
3995}
3996
3997static void perf_swevent_cancel_hrtimer(struct perf_event *event)
3998{
3999 struct hw_perf_event *hwc = &event->hw;
4000
4001 if (hwc->sample_period) {
4002 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4003 hwc->remaining = ktime_to_ns(remaining);
4004
4005 hrtimer_cancel(&hwc->hrtimer);
4006 }
4007}
4008
3972/* 4009/*
3973 * Software event: cpu wall time clock 4010 * Software event: cpu wall time clock
3974 */ 4011 */
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3991 int cpu = raw_smp_processor_id(); 4028 int cpu = raw_smp_processor_id();
3992 4029
3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4030 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4031 perf_swevent_start_hrtimer(event);
3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3996 if (hwc->sample_period) {
3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3998 __hrtimer_start_range_ns(&hwc->hrtimer,
3999 ns_to_ktime(period), 0,
4000 HRTIMER_MODE_REL, 0);
4001 }
4002 4032
4003 return 0; 4033 return 0;
4004} 4034}
4005 4035
4006static void cpu_clock_perf_event_disable(struct perf_event *event) 4036static void cpu_clock_perf_event_disable(struct perf_event *event)
4007{ 4037{
4008 if (event->hw.sample_period) 4038 perf_swevent_cancel_hrtimer(event);
4009 hrtimer_cancel(&event->hw.hrtimer);
4010 cpu_clock_perf_event_update(event); 4039 cpu_clock_perf_event_update(event);
4011} 4040}
4012 4041
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
4043 now = event->ctx->time; 4072 now = event->ctx->time;
4044 4073
4045 atomic64_set(&hwc->prev_count, now); 4074 atomic64_set(&hwc->prev_count, now);
4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4075
4047 hwc->hrtimer.function = perf_swevent_hrtimer; 4076 perf_swevent_start_hrtimer(event);
4048 if (hwc->sample_period) {
4049 u64 period = max_t(u64, 10000, hwc->sample_period);
4050 __hrtimer_start_range_ns(&hwc->hrtimer,
4051 ns_to_ktime(period), 0,
4052 HRTIMER_MODE_REL, 0);
4053 }
4054 4077
4055 return 0; 4078 return 0;
4056} 4079}
4057 4080
4058static void task_clock_perf_event_disable(struct perf_event *event) 4081static void task_clock_perf_event_disable(struct perf_event *event)
4059{ 4082{
4060 if (event->hw.sample_period) 4083 perf_swevent_cancel_hrtimer(event);
4061 hrtimer_cancel(&event->hw.hrtimer);
4062 task_clock_perf_event_update(event, event->ctx->time); 4084 task_clock_perf_event_update(event, event->ctx->time);
4063 4085
4064} 4086}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04b3a83d686f..04a9e90d248f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,21 +693,22 @@ static int software_resume(void)
693 /* The snapshot device should not be opened while we're running */ 693 /* The snapshot device should not be opened while we're running */
694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
695 error = -EBUSY; 695 error = -EBUSY;
696 swsusp_close(FMODE_READ);
696 goto Unlock; 697 goto Unlock;
697 } 698 }
698 699
699 pm_prepare_console(); 700 pm_prepare_console();
700 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 701 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
701 if (error) 702 if (error)
702 goto Finish; 703 goto close_finish;
703 704
704 error = usermodehelper_disable(); 705 error = usermodehelper_disable();
705 if (error) 706 if (error)
706 goto Finish; 707 goto close_finish;
707 708
708 error = create_basic_memory_bitmaps(); 709 error = create_basic_memory_bitmaps();
709 if (error) 710 if (error)
710 goto Finish; 711 goto close_finish;
711 712
712 pr_debug("PM: Preparing processes for restore.\n"); 713 pr_debug("PM: Preparing processes for restore.\n");
713 error = prepare_processes(); 714 error = prepare_processes();
@@ -719,6 +720,7 @@ static int software_resume(void)
719 pr_debug("PM: Reading hibernation image.\n"); 720 pr_debug("PM: Reading hibernation image.\n");
720 721
721 error = swsusp_read(&flags); 722 error = swsusp_read(&flags);
723 swsusp_close(FMODE_READ);
722 if (!error) 724 if (!error)
723 hibernation_restore(flags & SF_PLATFORM_MODE); 725 hibernation_restore(flags & SF_PLATFORM_MODE);
724 726
@@ -737,6 +739,9 @@ static int software_resume(void)
737 mutex_unlock(&pm_mutex); 739 mutex_unlock(&pm_mutex);
738 pr_debug("PM: Resume from disk failed.\n"); 740 pr_debug("PM: Resume from disk failed.\n");
739 return error; 741 return error;
742close_finish:
743 swsusp_close(FMODE_READ);
744 goto Finish;
740} 745}
741 746
742late_initcall(software_resume); 747late_initcall(software_resume);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 17d8bb1acf9c..25596e450ac7 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -19,7 +19,7 @@
19 * The time it takes is system-specific though, so when we test this 19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time. 20 * during system bootup we allow a LOT of time.
21 */ 21 */
22#define TEST_SUSPEND_SECONDS 5 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25 25
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
49 * has some performance issues. The stack dump of a WARN_ON 49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk... 50 * is more likely to get the right attention than a printk...
51 */ 51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); 52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
53 "Component: %s, time: %u\n", label, msec);
53} 54}
54 55
55/* 56/*
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b101cdc4df3f..890f6b11b1d3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle,
314{ 314{
315 unsigned int m; 315 unsigned int m;
316 int ret; 316 int ret;
317 int error = 0;
318 int nr_pages; 317 int nr_pages;
319 int err2; 318 int err2;
320 struct bio *bio; 319 struct bio *bio;
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle,
329 nr_pages = 0; 328 nr_pages = 0;
330 bio = NULL; 329 bio = NULL;
331 do_gettimeofday(&start); 330 do_gettimeofday(&start);
332 do { 331 while (1) {
333 ret = snapshot_read_next(snapshot, PAGE_SIZE); 332 ret = snapshot_read_next(snapshot, PAGE_SIZE);
334 if (ret > 0) { 333 if (ret <= 0)
335 error = swap_write_page(handle, data_of(*snapshot), 334 break;
336 &bio); 335 ret = swap_write_page(handle, data_of(*snapshot), &bio);
337 if (error) 336 if (ret)
338 break; 337 break;
339 if (!(nr_pages % m)) 338 if (!(nr_pages % m))
340 printk("\b\b\b\b%3d%%", nr_pages / m); 339 printk("\b\b\b\b%3d%%", nr_pages / m);
341 nr_pages++; 340 nr_pages++;
342 } 341 }
343 } while (ret > 0);
344 err2 = wait_on_bio_chain(&bio); 342 err2 = wait_on_bio_chain(&bio);
345 do_gettimeofday(&stop); 343 do_gettimeofday(&stop);
346 if (!error) 344 if (!ret)
347 error = err2; 345 ret = err2;
348 if (!error) 346 if (!ret)
349 printk("\b\b\b\bdone\n"); 347 printk("\b\b\b\bdone\n");
348 else
349 printk("\n");
350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
351 return error; 351 return ret;
352} 352}
353 353
354/** 354/**
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle,
536 snapshot_write_finalize(snapshot); 536 snapshot_write_finalize(snapshot);
537 if (!snapshot_image_loaded(snapshot)) 537 if (!snapshot_image_loaded(snapshot))
538 error = -ENODATA; 538 error = -ENODATA;
539 } 539 } else
540 printk("\n");
540 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 541 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
541 return error; 542 return error;
542} 543}
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p)
572 error = load_image(&handle, &snapshot, header->pages - 1); 573 error = load_image(&handle, &snapshot, header->pages - 1);
573 release_swap_reader(&handle); 574 release_swap_reader(&handle);
574 575
575 blkdev_put(resume_bdev, FMODE_READ);
576
577 if (!error) 576 if (!error)
578 pr_debug("PM: Image successfully loaded\n"); 577 pr_debug("PM: Image successfully loaded\n");
579 else 578 else
@@ -596,7 +595,7 @@ int swsusp_check(void)
596 error = bio_read_page(swsusp_resume_block, 595 error = bio_read_page(swsusp_resume_block,
597 swsusp_header, NULL); 596 swsusp_header, NULL);
598 if (error) 597 if (error)
599 return error; 598 goto put;
600 599
601 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { 600 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
602 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 601 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
@@ -604,8 +603,10 @@ int swsusp_check(void)
604 error = bio_write_page(swsusp_resume_block, 603 error = bio_write_page(swsusp_resume_block,
605 swsusp_header, NULL); 604 swsusp_header, NULL);
606 } else { 605 } else {
607 return -EINVAL; 606 error = -EINVAL;
608 } 607 }
608
609put:
609 if (error) 610 if (error)
610 blkdev_put(resume_bdev, FMODE_READ); 611 blkdev_put(resume_bdev, FMODE_READ);
611 else 612 else
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 705f02ac7433..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -59,7 +59,7 @@
59 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
61 }, \ 61 }, \
62 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 63 .gpnum = -300, \
64 .completed = -300, \ 64 .completed = -300, \
65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * irqs disabled. 657 * irqs disabled.
658 */ 658 */
659 rcu_for_each_node_breadth_first(rsp, rnp) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
660 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
661 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
664 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 665 }
666 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
667 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
669} 672}
670 673
@@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706{ 709{
707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
708 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
711} 715}
@@ -913,7 +917,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
913 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 917 spin_unlock(&rnp->lock); /* irqs remain disabled. */
914 break; 918 break;
915 } 919 }
916 rcu_preempt_offline_tasks(rsp, rnp, rdp); 920
921 /*
922 * If there was a task blocking the current grace period,
923 * and if all CPUs have checked in, we need to propagate
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
917 mask = rnp->grpmask; 934 mask = rnp->grpmask;
918 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
919 rnp = rnp->parent; 936 rnp = rnp->parent;
@@ -958,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
958 * Invoke any RCU callbacks that have made it to the end of their grace 975 * Invoke any RCU callbacks that have made it to the end of their grace
959 * period. Thottle as specified by rdp->blimit. 976 * period. Thottle as specified by rdp->blimit.
960 */ 977 */
961static void rcu_do_batch(struct rcu_data *rdp) 978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
962{ 979{
963 unsigned long flags; 980 unsigned long flags;
964 struct rcu_head *next, *list, **tail; 981 struct rcu_head *next, *list, **tail;
@@ -1011,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
1011 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1012 rdp->blimit = blimit; 1029 rdp->blimit = blimit;
1013 1030
1031 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
1014 local_irq_restore(flags); 1038 local_irq_restore(flags);
1015 1039
1016 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1040 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1142,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1142 } 1166 }
1143 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1144 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1145 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1146 1171
1147 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1148 1173
1149 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1150 1175
@@ -1158,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1158 1183
1159 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1160 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1161 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1162 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1163 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1164 } 1190 }
@@ -1224,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1224 } 1250 }
1225 1251
1226 /* If there are callbacks ready, invoke them. */ 1252 /* If there are callbacks ready, invoke them. */
1227 rcu_do_batch(rdp); 1253 rcu_do_batch(rsp, rdp);
1228} 1254}
1229 1255
1230/* 1256/*
@@ -1288,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1288 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1314 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1289 } 1315 }
1290 1316
1291 /* Force the grace period if too many callbacks or too long waiting. */ 1317 /*
1292 if (unlikely(++rdp->qlen > qhimark)) { 1318 * Force the grace period if too many callbacks or too long waiting.
1319 * Enforce hysteresis, and don't invoke force_quiescent_state()
1320 * if some other CPU has recently done so. Also, don't bother
1321 * invoking force_quiescent_state() if the newly enqueued callback
1322 * is the only one waiting for a grace period to complete.
1323 */
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1293 rdp->blimit = LONG_MAX; 1325 rdp->blimit = LONG_MAX;
1294 force_quiescent_state(rsp, 0); 1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1295 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1296 force_quiescent_state(rsp, 1); 1332 force_quiescent_state(rsp, 1);
1297 local_irq_restore(flags); 1333 local_irq_restore(flags);
@@ -1523,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1523 rdp->beenonline = 1; /* We have now been online. */ 1559 rdp->beenonline = 1; /* We have now been online. */
1524 rdp->preemptable = preemptable; 1560 rdp->preemptable = preemptable;
1525 rdp->passed_quiesc_completed = lastcomp - 1; 1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1526 rdp->blimit = blimit; 1564 rdp->blimit = blimit;
1527 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1565 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1528 1566
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index b40ac5706040..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -167,6 +167,10 @@ struct rcu_data {
167 struct rcu_head *nxtlist; 167 struct rcu_head *nxtlist;
168 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 168 struct rcu_head **nxttail[RCU_NEXT_SIZE];
169 long qlen; /* # of queued callbacks */ 169 long qlen; /* # of queued callbacks */
170 long qlen_last_fqs_check;
171 /* qlen at last check for QS forcing */
172 unsigned long n_force_qs_snap;
173 /* did other CPU force QS recently? */
170 long blimit; /* Upper limit on a processed batch */ 174 long blimit; /* Upper limit on a processed batch */
171 175
172#ifdef CONFIG_NO_HZ 176#ifdef CONFIG_NO_HZ
@@ -197,9 +201,10 @@ struct rcu_data {
197}; 201};
198 202
199/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
200#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
201#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
202#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
203#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
204#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
205#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
@@ -302,9 +307,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp);
302#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
303static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 308static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
304#ifdef CONFIG_HOTPLUG_CPU 309#ifdef CONFIG_HOTPLUG_CPU
305static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 310static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
306 struct rcu_node *rnp, 311 struct rcu_node *rnp,
307 struct rcu_data *rdp); 312 struct rcu_data *rdp);
308static void rcu_preempt_offline_cpu(int cpu); 313static void rcu_preempt_offline_cpu(int cpu);
309#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 314#endif /* #ifdef CONFIG_HOTPLUG_CPU */
310static void rcu_preempt_check_callbacks(int cpu); 315static void rcu_preempt_check_callbacks(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c0cb783aa16a..ef2a58c2b9d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
304 * parent is to remove the need for rcu_read_unlock_special() to 304 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 305 * make more than two attempts to acquire the target rcu_node's lock.
306 * 306 *
307 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure.
309 *
307 * The caller must hold rnp->lock with irqs disabled. 310 * The caller must hold rnp->lock with irqs disabled.
308 */ 311 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 312static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 313 struct rcu_node *rnp,
311 struct rcu_data *rdp) 314 struct rcu_data *rdp)
312{ 315{
313 int i; 316 int i;
314 struct list_head *lp; 317 struct list_head *lp;
315 struct list_head *lp_root; 318 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp);
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 320 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 321 struct task_struct *tp;
318 322
319 if (rnp == rnp_root) { 323 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 324 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 325 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 326 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 327 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 328 (!list_empty(&rnp->blocked_tasks[0]) ||
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 347 }
344 } 348 }
349
350 return retval;
345} 351}
346 352
347/* 353/*
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
393EXPORT_SYMBOL_GPL(call_rcu); 399EXPORT_SYMBOL_GPL(call_rcu);
394 400
395/* 401/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the
403 * grace period, but this is the crude slow compatability hack, so just
404 * invoke synchronize_rcu().
405 */
406void synchronize_rcu_expedited(void)
407{
408 synchronize_rcu();
409}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411
412/*
396 * Check to see if there is any immediate preemptable-RCU-related work 413 * Check to see if there is any immediate preemptable-RCU-related work
397 * to be done. 414 * to be done.
398 */ 415 */
@@ -521,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
521 538
522/* 539/*
523 * Because preemptable RCU does not exist, it never needs to migrate 540 * Because preemptable RCU does not exist, it never needs to migrate
524 * tasks that were blocked within RCU read-side critical sections. 541 * tasks that were blocked within RCU read-side critical sections, and
542 * such non-existent tasks cannot possibly have been blocking the current
543 * grace period.
525 */ 544 */
526static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 545static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
527 struct rcu_node *rnp, 546 struct rcu_node *rnp,
528 struct rcu_data *rdp) 547 struct rcu_data *rdp)
529{ 548{
549 return 0;
530} 550}
531 551
532/* 552/*
@@ -565,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
565EXPORT_SYMBOL_GPL(call_rcu); 585EXPORT_SYMBOL_GPL(call_rcu);
566 586
567/* 587/*
588 * Wait for an rcu-preempt grace period, but make it happen quickly.
589 * But because preemptable RCU does not exist, map to rcu-sched.
590 */
591void synchronize_rcu_expedited(void)
592{
593 synchronize_sched_expedited();
594}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596
597/*
568 * Because preemptable RCU does not exist, it never has any work to do. 598 * Because preemptable RCU does not exist, it never has any work to do.
569 */ 599 */
570static int rcu_preempt_pending(int cpu) 600static int rcu_preempt_pending(int cpu)
diff --git a/kernel/sched.c b/kernel/sched.c
index e88689522e66..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1564 1565
1565#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1566 1567
1567struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1568 unsigned long rq_weight[NR_CPUS];
1569};
1570
1571static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1572 1569
1573static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1574 1571
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1578static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1579 unsigned long sd_shares, 1576 unsigned long sd_shares,
1580 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1581 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1582{ 1579{
1583 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1584 int boost = 0; 1581 int boost = 0;
1585 1582
1586 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1587 if (!rq_weight) { 1584 if (!rq_weight) {
1588 boost = 1; 1585 boost = 1;
1589 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1618static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1619{ 1616{
1620 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1621 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1622 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1623 unsigned long flags; 1620 unsigned long flags;
1624 int i; 1621 int i;
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1627 return 0; 1624 return 0;
1628 1625
1629 local_irq_save(flags); 1626 local_irq_save(flags);
1630 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1631 1628
1632 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1633 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1634 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1635 1632
1636 /* 1633 /*
1637 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1652 shares = tg->shares; 1649 shares = tg->shares;
1653 1650
1654 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1655 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1656 1653
1657 local_irq_restore(flags); 1654 local_irq_restore(flags);
1658 1655
@@ -1996,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1996 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1997} 1994}
1998 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
1999#ifdef CONFIG_SMP 2028#ifdef CONFIG_SMP
2000/* 2029/*
2001 * Is this task likely cache-hot: 2030 * Is this task likely cache-hot:
@@ -2008,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2008 /* 2037 /*
2009 * Buddy candidates are cache hot: 2038 * Buddy candidates are cache hot:
2010 */ 2039 */
2011 if (sched_feat(CACHE_HOT_BUDDY) && 2040 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2012 (&p->se == cfs_rq_of(&p->se)->next || 2041 (&p->se == cfs_rq_of(&p->se)->next ||
2013 &p->se == cfs_rq_of(&p->se)->last)) 2042 &p->se == cfs_rq_of(&p->se)->last))
2014 return 1; 2043 return 1;
@@ -9407,6 +9436,10 @@ void __init sched_init(void)
9407#endif /* CONFIG_USER_SCHED */ 9436#endif /* CONFIG_USER_SCHED */
9408#endif /* CONFIG_GROUP_SCHED */ 9437#endif /* CONFIG_GROUP_SCHED */
9409 9438
9439#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9440 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9441 __alignof__(unsigned long));
9442#endif
9410 for_each_possible_cpu(i) { 9443 for_each_possible_cpu(i) {
9411 struct rq *rq; 9444 struct rq *rq;
9412 9445
@@ -9532,13 +9565,13 @@ void __init sched_init(void)
9532 current->sched_class = &fair_sched_class; 9565 current->sched_class = &fair_sched_class;
9533 9566
9534 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9567 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9535 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9568 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9536#ifdef CONFIG_SMP 9569#ifdef CONFIG_SMP
9537#ifdef CONFIG_NO_HZ 9570#ifdef CONFIG_NO_HZ
9538 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9539 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9540#endif 9573#endif
9541 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9542#endif /* SMP */ 9575#endif /* SMP */
9543 9576
9544 perf_event_init(); 9577 perf_event_init();
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..37087a7fac22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
864 885
865 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
866 return cfs_rq->next; 887 se = cfs_rq->next;
867 888
868 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 889 /*
869 return cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
891 */
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
870 896
871 return se; 897 return se;
872} 898}
@@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1568 struct sched_entity *se = &curr->se, *pse = &p->se; 1594 struct sched_entity *se = &curr->se, *pse = &p->se;
1569 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1570 int sync = wake_flags & WF_SYNC; 1596 int sync = wake_flags & WF_SYNC;
1597 int scale = cfs_rq->nr_running >= sched_nr_latency;
1571 1598
1572 update_curr(cfs_rq); 1599 update_curr(cfs_rq);
1573 1600
@@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1582 if (unlikely(se == pse)) 1609 if (unlikely(se == pse))
1583 return; 1610 return;
1584 1611
1585 /* 1612 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1586 * Only set the backward buddy when the current task is still on the
1587 * rq. This can happen when a wakeup gets interleaved with schedule on
1588 * the ->pre_schedule() or idle_balance() point, either of which can
1589 * drop the rq lock.
1590 *
1591 * Also, during early boot the idle thread is in the fair class, for
1592 * obvious reasons its a bad idea to schedule back to the idle thread.
1593 */
1594 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1595 set_last_buddy(se);
1596 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1597 set_next_buddy(pse); 1613 set_next_buddy(pse);
1598 1614
1599 /* 1615 /*
@@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1639 1655
1640 BUG_ON(!pse); 1656 BUG_ON(!pse);
1641 1657
1642 if (wakeup_preempt_entity(se, pse) == 1) 1658 if (wakeup_preempt_entity(se, pse) == 1) {
1643 resched_task(curr); 1659 resched_task(curr);
1660 /*
1661 * Only set the backward buddy when the current task is still
1662 * on the rq. This can happen when a wakeup gets interleaved
1663 * with schedule on the ->pre_schedule() or idle_balance()
1664 * point, either of which can * drop the rq lock.
1665 *
1666 * Also, during early boot the idle thread is in the fair class,
1667 * for obvious reasons its a bad idea to schedule back to it.
1668 */
1669 if (unlikely(!se->on_rq || curr == rq->idle))
1670 return;
1671 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672 set_last_buddy(se);
1673 }
1644} 1674}
1645 1675
1646static struct task_struct *pick_next_task_fair(struct rq *rq) 1676static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1654 1684
1655 do { 1685 do {
1656 se = pick_next_entity(cfs_rq); 1686 se = pick_next_entity(cfs_rq);
1657 /*
1658 * If se was a buddy, clear it so that it will have to earn
1659 * the favour again.
1660 *
1661 * If se was not a buddy, clear the buddies because neither
1662 * was elegible to run, let them earn it again.
1663 *
1664 * IOW. unconditionally clear buddies.
1665 */
1666 __clear_buddies(cfs_rq, NULL);
1667 set_next_entity(cfs_rq, se); 1687 set_next_entity(cfs_rq, se);
1668 cfs_rq = group_cfs_rq(se); 1688 cfs_rq = group_cfs_rq(se);
1669 } while (cfs_rq); 1689 } while (cfs_rq);
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c
new file mode 100644
index 000000000000..3988032571f5
--- /dev/null
+++ b/kernel/slow-work-proc.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for /proc
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/proc/slow_work_rq" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..da94f3c101af 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/proc_fs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
@@ -98,6 +95,32 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 95static struct slow_work slow_work_new_thread; /* new thread starter */
99 96
100/* 97/*
98 * slow work ID allocation (use slow_work_queue_lock)
99 */
100static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
101
102/*
103 * Unregistration tracking to prevent put_ref() from disappearing during module
104 * unload
105 */
106#ifdef CONFIG_MODULES
107static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
108static struct module *slow_work_unreg_module;
109static struct slow_work *slow_work_unreg_work_item;
110static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
111static DEFINE_MUTEX(slow_work_unreg_sync_lock);
112#endif
113
114/*
115 * Data for tracking currently executing items for indication through /proc
116 */
117#ifdef CONFIG_SLOW_WORK_PROC
118struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
119pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
120DEFINE_RWLOCK(slow_work_execs_lock);
121#endif
122
123/*
101 * The queues of work items and the lock governing access to them. These are 124 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 125 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 126 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +128,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 128 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 129 * very slow work items.
107 */ 130 */
108static LIST_HEAD(slow_work_queue); 131LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 132LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 133DEFINE_SPINLOCK(slow_work_queue_lock);
134
135/*
136 * The following are two wait queues that get pinged when a work item is placed
137 * on an empty queue. These allow work items that are hogging a thread by
138 * sleeping in a way that could be deferred to yield their thread and enqueue
139 * themselves.
140 */
141static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
142static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 143
112/* 144/*
113 * The thread controls. A variable used to signal to the threads that they 145 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +158,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 158static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 159static DEFINE_MUTEX(slow_work_user_lock);
128 160
161static inline int slow_work_get_ref(struct slow_work *work)
162{
163 if (work->ops->get_ref)
164 return work->ops->get_ref(work);
165
166 return 0;
167}
168
169static inline void slow_work_put_ref(struct slow_work *work)
170{
171 if (work->ops->put_ref)
172 work->ops->put_ref(work);
173}
174
129/* 175/*
130 * Calculate the maximum number of active threads in the pool that are 176 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 177 * permitted to process very slow work items.
@@ -149,8 +195,11 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 195 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 196 * it, false if there was nothing to do.
151 */ 197 */
152static bool slow_work_execute(void) 198static noinline bool slow_work_execute(int id)
153{ 199{
200#ifdef CONFIG_MODULES
201 struct module *module;
202#endif
154 struct slow_work *work = NULL; 203 struct slow_work *work = NULL;
155 unsigned vsmax; 204 unsigned vsmax;
156 bool very_slow; 205 bool very_slow;
@@ -186,6 +235,16 @@ static bool slow_work_execute(void)
186 } else { 235 } else {
187 very_slow = false; /* avoid the compiler warning */ 236 very_slow = false; /* avoid the compiler warning */
188 } 237 }
238
239#ifdef CONFIG_MODULES
240 if (work)
241 slow_work_thread_processing[id] = work->owner;
242#endif
243 if (work) {
244 slow_work_mark_time(work);
245 slow_work_begin_exec(id, work);
246 }
247
189 spin_unlock_irq(&slow_work_queue_lock); 248 spin_unlock_irq(&slow_work_queue_lock);
190 249
191 if (!work) 250 if (!work)
@@ -194,12 +253,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 253 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 254 BUG();
196 255
197 work->ops->execute(work); 256 /* don't execute if the work is in the process of being cancelled */
257 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
258 work->ops->execute(work);
198 259
199 if (very_slow) 260 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 261 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 262 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 263
264 /* wake up anyone waiting for this work to be complete */
265 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
266
267 slow_work_end_exec(id, work);
268
203 /* if someone tried to enqueue the item whilst we were executing it, 269 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 270 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 271 * execute it simultaneously
@@ -219,7 +285,18 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 285 spin_unlock_irq(&slow_work_queue_lock);
220 } 286 }
221 287
222 work->ops->put_ref(work); 288 /* sort out the race between module unloading and put_ref() */
289 slow_work_put_ref(work);
290
291#ifdef CONFIG_MODULES
292 module = slow_work_thread_processing[id];
293 slow_work_thread_processing[id] = NULL;
294 smp_mb();
295 if (slow_work_unreg_work_item == work ||
296 slow_work_unreg_module == module)
297 wake_up_all(&slow_work_unreg_wq);
298#endif
299
223 return true; 300 return true;
224 301
225auto_requeue: 302auto_requeue:
@@ -227,15 +304,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 304 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 305 * - don't wake another thread up as we're awake already
229 */ 306 */
307 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 308 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 309 list_add_tail(&work->link, &vslow_work_queue);
232 else 310 else
233 list_add_tail(&work->link, &slow_work_queue); 311 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 312 spin_unlock_irq(&slow_work_queue_lock);
313 slow_work_thread_processing[id] = NULL;
235 return true; 314 return true;
236} 315}
237 316
238/** 317/**
318 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
319 * work: The work item under execution that wants to sleep
320 * _timeout: Scheduler sleep timeout
321 *
322 * Allow a requeueable work item to sleep on a slow-work processor thread until
323 * that thread is needed to do some other work or the sleep is interrupted by
324 * some other event.
325 *
326 * The caller must set up a wake up event before calling this and must have set
327 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
328 * condition before calling this function as no test is made here.
329 *
330 * False is returned if there is nothing on the queue; true is returned if the
331 * work item should be requeued
332 */
333bool slow_work_sleep_till_thread_needed(struct slow_work *work,
334 signed long *_timeout)
335{
336 wait_queue_head_t *wfo_wq;
337 struct list_head *queue;
338
339 DEFINE_WAIT(wait);
340
341 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
342 wfo_wq = &vslow_work_queue_waits_for_occupation;
343 queue = &vslow_work_queue;
344 } else {
345 wfo_wq = &slow_work_queue_waits_for_occupation;
346 queue = &slow_work_queue;
347 }
348
349 if (!list_empty(queue))
350 return true;
351
352 add_wait_queue_exclusive(wfo_wq, &wait);
353 if (list_empty(queue))
354 *_timeout = schedule_timeout(*_timeout);
355 finish_wait(wfo_wq, &wait);
356
357 return !list_empty(queue);
358}
359EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
360
361/**
239 * slow_work_enqueue - Schedule a slow work item for processing 362 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 363 * @work: The work item to queue
241 * 364 *
@@ -260,16 +383,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 383 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 384 * overly block ones that are just ordinarily slow.
262 * 385 *
263 * Returns 0 if successful, -EAGAIN if not. 386 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
387 * attempted queued)
264 */ 388 */
265int slow_work_enqueue(struct slow_work *work) 389int slow_work_enqueue(struct slow_work *work)
266{ 390{
391 wait_queue_head_t *wfo_wq;
392 struct list_head *queue;
267 unsigned long flags; 393 unsigned long flags;
394 int ret;
395
396 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
397 return -ECANCELED;
268 398
269 BUG_ON(slow_work_user_count <= 0); 399 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 400 BUG_ON(!work);
271 BUG_ON(!work->ops); 401 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 402
274 /* when honouring an enqueue request, we only promise that we will run 403 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 404 * the work function in the future; we do not promise to run it once
@@ -280,8 +409,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 409 * maintaining our promise
281 */ 410 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 411 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
412 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
413 wfo_wq = &vslow_work_queue_waits_for_occupation;
414 queue = &vslow_work_queue;
415 } else {
416 wfo_wq = &slow_work_queue_waits_for_occupation;
417 queue = &slow_work_queue;
418 }
419
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 420 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 421
422 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
423 goto cancelled;
424
285 /* we promise that we will not attempt to execute the work 425 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 426 * function in more than one thread simultaneously
287 * 427 *
@@ -299,25 +439,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 439 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 440 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 441 } else {
302 if (work->ops->get_ref(work) < 0) 442 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 443 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 444 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 445 slow_work_mark_time(work);
306 else 446 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 447 wake_up(&slow_work_thread_wq);
448
449 /* if someone who could be requeued is sleeping on a
450 * thread, then ask them to yield their thread */
451 if (work->link.prev == queue)
452 wake_up(wfo_wq);
309 } 453 }
310 454
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 455 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 456 }
313 return 0; 457 return 0;
314 458
315cant_get_ref: 459cancelled:
460 ret = -ECANCELED;
461failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 462 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 463 return ret;
318} 464}
319EXPORT_SYMBOL(slow_work_enqueue); 465EXPORT_SYMBOL(slow_work_enqueue);
320 466
467static int slow_work_wait(void *word)
468{
469 schedule();
470 return 0;
471}
472
473/**
474 * slow_work_cancel - Cancel a slow work item
475 * @work: The work item to cancel
476 *
477 * This function will cancel a previously enqueued work item. If we cannot
478 * cancel the work item, it is guarenteed to have run when this function
479 * returns.
480 */
481void slow_work_cancel(struct slow_work *work)
482{
483 bool wait = true, put = false;
484
485 set_bit(SLOW_WORK_CANCELLING, &work->flags);
486 smp_mb();
487
488 /* if the work item is a delayed work item with an active timer, we
489 * need to wait for the timer to finish _before_ getting the spinlock,
490 * lest we deadlock against the timer routine
491 *
492 * the timer routine will leave DELAYED set if it notices the
493 * CANCELLING flag in time
494 */
495 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
496 struct delayed_slow_work *dwork =
497 container_of(work, struct delayed_slow_work, work);
498 del_timer_sync(&dwork->timer);
499 }
500
501 spin_lock_irq(&slow_work_queue_lock);
502
503 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
504 /* the timer routine aborted or never happened, so we are left
505 * holding the timer's reference on the item and should just
506 * drop the pending flag and wait for any ongoing execution to
507 * finish */
508 struct delayed_slow_work *dwork =
509 container_of(work, struct delayed_slow_work, work);
510
511 BUG_ON(timer_pending(&dwork->timer));
512 BUG_ON(!list_empty(&work->link));
513
514 clear_bit(SLOW_WORK_DELAYED, &work->flags);
515 put = true;
516 clear_bit(SLOW_WORK_PENDING, &work->flags);
517
518 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
519 !list_empty(&work->link)) {
520 /* the link in the pending queue holds a reference on the item
521 * that we will need to release */
522 list_del_init(&work->link);
523 wait = false;
524 put = true;
525 clear_bit(SLOW_WORK_PENDING, &work->flags);
526
527 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
528 /* the executor is holding our only reference on the item, so
529 * we merely need to wait for it to finish executing */
530 clear_bit(SLOW_WORK_PENDING, &work->flags);
531 }
532
533 spin_unlock_irq(&slow_work_queue_lock);
534
535 /* the EXECUTING flag is set by the executor whilst the spinlock is set
536 * and before the item is dequeued - so assuming the above doesn't
537 * actually dequeue it, simply waiting for the EXECUTING flag to be
538 * released here should be sufficient */
539 if (wait)
540 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
541 TASK_UNINTERRUPTIBLE);
542
543 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
544 if (put)
545 slow_work_put_ref(work);
546}
547EXPORT_SYMBOL(slow_work_cancel);
548
549/*
550 * Handle expiry of the delay timer, indicating that a delayed slow work item
551 * should now be queued if not cancelled
552 */
553static void delayed_slow_work_timer(unsigned long data)
554{
555 wait_queue_head_t *wfo_wq;
556 struct list_head *queue;
557 struct slow_work *work = (struct slow_work *) data;
558 unsigned long flags;
559 bool queued = false, put = false, first = false;
560
561 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
562 wfo_wq = &vslow_work_queue_waits_for_occupation;
563 queue = &vslow_work_queue;
564 } else {
565 wfo_wq = &slow_work_queue_waits_for_occupation;
566 queue = &slow_work_queue;
567 }
568
569 spin_lock_irqsave(&slow_work_queue_lock, flags);
570 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
571 clear_bit(SLOW_WORK_DELAYED, &work->flags);
572
573 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
574 /* we discard the reference the timer was holding in
575 * favour of the one the executor holds */
576 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
577 put = true;
578 } else {
579 slow_work_mark_time(work);
580 list_add_tail(&work->link, queue);
581 queued = true;
582 if (work->link.prev == queue)
583 first = true;
584 }
585 }
586
587 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
588 if (put)
589 slow_work_put_ref(work);
590 if (first)
591 wake_up(wfo_wq);
592 if (queued)
593 wake_up(&slow_work_thread_wq);
594}
595
596/**
597 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
598 * @dwork: The delayed work item to queue
599 * @delay: When to start executing the work, in jiffies from now
600 *
601 * This is similar to slow_work_enqueue(), but it adds a delay before the work
602 * is actually queued for processing.
603 *
604 * The item can have delayed processing requested on it whilst it is being
605 * executed. The delay will begin immediately, and if it expires before the
606 * item finishes executing, the item will be placed back on the queue when it
607 * has done executing.
608 */
609int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
610 unsigned long delay)
611{
612 struct slow_work *work = &dwork->work;
613 unsigned long flags;
614 int ret;
615
616 if (delay == 0)
617 return slow_work_enqueue(&dwork->work);
618
619 BUG_ON(slow_work_user_count <= 0);
620 BUG_ON(!work);
621 BUG_ON(!work->ops);
622
623 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
624 return -ECANCELED;
625
626 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
627 spin_lock_irqsave(&slow_work_queue_lock, flags);
628
629 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
630 goto cancelled;
631
632 /* the timer holds a reference whilst it is pending */
633 ret = work->ops->get_ref(work);
634 if (ret < 0)
635 goto cant_get_ref;
636
637 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
638 BUG();
639 dwork->timer.expires = jiffies + delay;
640 dwork->timer.data = (unsigned long) work;
641 dwork->timer.function = delayed_slow_work_timer;
642 add_timer(&dwork->timer);
643
644 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
645 }
646
647 return 0;
648
649cancelled:
650 ret = -ECANCELED;
651cant_get_ref:
652 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
653 return ret;
654}
655EXPORT_SYMBOL(delayed_slow_work_enqueue);
656
321/* 657/*
322 * Schedule a cull of the thread pool at some time in the near future 658 * Schedule a cull of the thread pool at some time in the near future
323 */ 659 */
@@ -368,13 +704,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 704 */
369static int slow_work_thread(void *_data) 705static int slow_work_thread(void *_data)
370{ 706{
371 int vsmax; 707 int vsmax, id;
372 708
373 DEFINE_WAIT(wait); 709 DEFINE_WAIT(wait);
374 710
375 set_freezable(); 711 set_freezable();
376 set_user_nice(current, -5); 712 set_user_nice(current, -5);
377 713
714 /* allocate ourselves an ID */
715 spin_lock_irq(&slow_work_queue_lock);
716 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
717 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
718 __set_bit(id, slow_work_ids);
719 slow_work_set_thread_pid(id, current->pid);
720 spin_unlock_irq(&slow_work_queue_lock);
721
722 sprintf(current->comm, "kslowd%03u", id);
723
378 for (;;) { 724 for (;;) {
379 vsmax = vslow_work_proportion; 725 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 726 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +741,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 741 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 742 vsmax /= 100;
397 743
398 if (slow_work_available(vsmax) && slow_work_execute()) { 744 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 745 cond_resched();
400 if (list_empty(&slow_work_queue) && 746 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 747 list_empty(&vslow_work_queue) &&
@@ -412,6 +758,11 @@ static int slow_work_thread(void *_data)
412 break; 758 break;
413 } 759 }
414 760
761 spin_lock_irq(&slow_work_queue_lock);
762 slow_work_set_thread_pid(id, 0);
763 __clear_bit(id, slow_work_ids);
764 spin_unlock_irq(&slow_work_queue_lock);
765
415 if (atomic_dec_and_test(&slow_work_thread_count)) 766 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 767 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 768 return 0;
@@ -427,21 +778,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 778}
428 779
429/* 780/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 781 * Start a new slow work thread
446 */ 782 */
447static void slow_work_new_thread_execute(struct slow_work *work) 783static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +811,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 811}
476 812
477static const struct slow_work_ops slow_work_new_thread_ops = { 813static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 814 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 815 .execute = slow_work_new_thread_execute,
816#ifdef CONFIG_SLOW_WORK_PROC
817 .desc = slow_work_new_thread_desc,
818#endif
481}; 819};
482 820
483/* 821/*
@@ -546,12 +884,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 884
547/** 885/**
548 * slow_work_register_user - Register a user of the facility 886 * slow_work_register_user - Register a user of the facility
887 * @module: The module about to make use of the facility
549 * 888 *
550 * Register a user of the facility, starting up the initial threads if there 889 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 890 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 891 * an error if not.
553 */ 892 */
554int slow_work_register_user(void) 893int slow_work_register_user(struct module *module)
555{ 894{
556 struct task_struct *p; 895 struct task_struct *p;
557 int loop; 896 int loop;
@@ -598,14 +937,79 @@ error:
598} 937}
599EXPORT_SYMBOL(slow_work_register_user); 938EXPORT_SYMBOL(slow_work_register_user);
600 939
940/*
941 * wait for all outstanding items from the calling module to complete
942 * - note that more items may be queued whilst we're waiting
943 */
944static void slow_work_wait_for_items(struct module *module)
945{
946 DECLARE_WAITQUEUE(myself, current);
947 struct slow_work *work;
948 int loop;
949
950 mutex_lock(&slow_work_unreg_sync_lock);
951 add_wait_queue(&slow_work_unreg_wq, &myself);
952
953 for (;;) {
954 spin_lock_irq(&slow_work_queue_lock);
955
956 /* first of all, we wait for the last queued item in each list
957 * to be processed */
958 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
959 if (work->owner == module) {
960 set_current_state(TASK_UNINTERRUPTIBLE);
961 slow_work_unreg_work_item = work;
962 goto do_wait;
963 }
964 }
965 list_for_each_entry_reverse(work, &slow_work_queue, link) {
966 if (work->owner == module) {
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 slow_work_unreg_work_item = work;
969 goto do_wait;
970 }
971 }
972
973 /* then we wait for the items being processed to finish */
974 slow_work_unreg_module = module;
975 smp_mb();
976 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
977 if (slow_work_thread_processing[loop] == module)
978 goto do_wait;
979 }
980 spin_unlock_irq(&slow_work_queue_lock);
981 break; /* okay, we're done */
982
983 do_wait:
984 spin_unlock_irq(&slow_work_queue_lock);
985 schedule();
986 slow_work_unreg_work_item = NULL;
987 slow_work_unreg_module = NULL;
988 }
989
990 remove_wait_queue(&slow_work_unreg_wq, &myself);
991 mutex_unlock(&slow_work_unreg_sync_lock);
992}
993
601/** 994/**
602 * slow_work_unregister_user - Unregister a user of the facility 995 * slow_work_unregister_user - Unregister a user of the facility
996 * @module: The module whose items should be cleared
603 * 997 *
604 * Unregister a user of the facility, killing all the threads if this was the 998 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 999 * last one.
1000 *
1001 * This waits for all the work items belonging to the nominated module to go
1002 * away before proceeding.
606 */ 1003 */
607void slow_work_unregister_user(void) 1004void slow_work_unregister_user(struct module *module)
608{ 1005{
1006 /* first of all, wait for all outstanding items from the calling module
1007 * to complete */
1008 if (module)
1009 slow_work_wait_for_items(module);
1010
1011 /* then we can actually go about shutting down the facility if need
1012 * be */
609 mutex_lock(&slow_work_user_lock); 1013 mutex_lock(&slow_work_user_lock);
610 1014
611 BUG_ON(slow_work_user_count <= 0); 1015 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1043,10 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1043 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1044 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1045#endif
1046#ifdef CONFIG_SLOW_WORK_PROC
1047 proc_create("slow_work_rq", S_IFREG | 0400, NULL,
1048 &slow_work_runqueue_fops);
1049#endif
642 return 0; 1050 return 0;
643} 1051}
644 1052
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..3c2f007f3ad6
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_PROC
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-proc.c
34 */
35#ifdef CONFIG_SLOW_WORK_PROC
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..ce17760d9c51 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid)
1110 err = session; 1110 err = session;
1111out: 1111out:
1112 write_unlock_irq(&tasklist_lock); 1112 write_unlock_irq(&tasklist_lock);
1113 if (err > 0)
1114 proc_sid_connector(group_leader);
1113 return err; 1115 return err;
1114} 1116}
1115 1117
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1546 if (arg4 | arg5) 1548 if (arg4 | arg5)
1547 return -EINVAL; 1549 return -EINVAL;
1548 switch (arg2) { 1550 switch (arg2) {
1549 case 0: 1551 case PR_MCE_KILL_CLEAR:
1550 if (arg3 != 0) 1552 if (arg3 != 0)
1551 return -EINVAL; 1553 return -EINVAL;
1552 current->flags &= ~PF_MCE_PROCESS; 1554 current->flags &= ~PF_MCE_PROCESS;
1553 break; 1555 break;
1554 case 1: 1556 case PR_MCE_KILL_SET:
1555 current->flags |= PF_MCE_PROCESS; 1557 current->flags |= PF_MCE_PROCESS;
1556 if (arg3 != 0) 1558 if (arg3 == PR_MCE_KILL_EARLY)
1557 current->flags |= PF_MCE_EARLY; 1559 current->flags |= PF_MCE_EARLY;
1558 else 1560 else if (arg3 == PR_MCE_KILL_LATE)
1559 current->flags &= ~PF_MCE_EARLY; 1561 current->flags &= ~PF_MCE_EARLY;
1562 else if (arg3 == PR_MCE_KILL_DEFAULT)
1563 current->flags &=
1564 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1565 else
1566 return -EINVAL;
1560 break; 1567 break;
1561 default: 1568 default:
1562 return -EINVAL; 1569 return -EINVAL;
1563 } 1570 }
1564 error = 0; 1571 error = 0;
1565 break; 1572 break;
1566 1573 case PR_MCE_KILL_GET:
1574 if (arg2 | arg3 | arg4 | arg5)
1575 return -EINVAL;
1576 if (current->flags & PF_MCE_PROCESS)
1577 error = (current->flags & PF_MCE_EARLY) ?
1578 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1579 else
1580 error = PR_MCE_KILL_DEFAULT;
1581 break;
1567 default: 1582 default:
1568 error = -EINVAL; 1583 error = -EINVAL;
1569 break; 1584 break;
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..b6e7aaea4604 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1521 if (!table->ctl_name && table->strategy) 1521 if (!table->ctl_name && table->strategy)
1522 set_fail(&fail, table, "Strategy without ctl_name"); 1522 set_fail(&fail, table, "Strategy without ctl_name");
1523#endif 1523#endif
1524#ifdef CONFIG_PROC_FS 1524#ifdef CONFIG_PROC_SYSCTL
1525 if (table->procname && !table->proc_handler) 1525 if (table->procname && !table->proc_handler)
1526 set_fail(&fail, table, "No proc_handler"); 1526 set_fail(&fail, table, "No proc_handler");
1527#endif 1527#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37ba67e33265..6dc4e5ef7a01 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -740,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
740 out: 740 out:
741 mutex_unlock(&ftrace_profile_lock); 741 mutex_unlock(&ftrace_profile_lock);
742 742
743 filp->f_pos += cnt; 743 *ppos += cnt;
744 744
745 return cnt; 745 return cnt;
746} 746}
@@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2222 ret = ftrace_process_regex(parser->buffer, 2222 ret = ftrace_process_regex(parser->buffer,
2223 parser->idx, enable); 2223 parser->idx, enable);
2224 if (ret) 2224 if (ret)
2225 goto out; 2225 goto out_unlock;
2226 2226
2227 trace_parser_clear(parser); 2227 trace_parser_clear(parser);
2228 } 2228 }
2229 2229
2230 ret = read; 2230 ret = read;
2231 2231out_unlock:
2232 mutex_unlock(&ftrace_regex_lock); 2232 mutex_unlock(&ftrace_regex_lock);
2233out: 2233
2234 return ret; 2234 return ret;
2235} 2235}
2236 2236
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..5dd017fea6f5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -483,7 +483,7 @@ struct ring_buffer_iter {
483/* Up this if you want to test the TIME_EXTENTS and normalization */ 483/* Up this if you want to test the TIME_EXTENTS and normalization */
484#define DEBUG_SHIFT 0 484#define DEBUG_SHIFT 0
485 485
486static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 486static inline u64 rb_time_stamp(struct ring_buffer *buffer)
487{ 487{
488 /* shift to debug/test normalization and TIME_EXTENTS */ 488 /* shift to debug/test normalization and TIME_EXTENTS */
489 return buffer->clock() << DEBUG_SHIFT; 489 return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
494 u64 time; 494 u64 time;
495 495
496 preempt_disable_notrace(); 496 preempt_disable_notrace();
497 time = rb_time_stamp(buffer, cpu); 497 time = rb_time_stamp(buffer);
498 preempt_enable_no_resched_notrace(); 498 preempt_enable_no_resched_notrace();
499 499
500 return time; 500 return time;
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
599} 599}
600 600
601/* 601/*
602 * rb_is_head_page - test if the give page is the head page 602 * rb_is_head_page - test if the given page is the head page
603 * 603 *
604 * Because the reader may move the head_page pointer, we can 604 * Because the reader may move the head_page pointer, we can
605 * not trust what the head page is (it may be pointing to 605 * not trust what the head page is (it may be pointing to
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 atomic_inc(&cpu_buffer->record_disabled); 1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched(); 1194 synchronize_sched();
1195 1195
1196 spin_lock_irq(&cpu_buffer->reader_lock);
1196 rb_head_page_deactivate(cpu_buffer); 1197 rb_head_page_deactivate(cpu_buffer);
1197 1198
1198 for (i = 0; i < nr_pages; i++) { 1199 for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1207 return; 1208 return;
1208 1209
1209 rb_reset_cpu(cpu_buffer); 1210 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1210 1212
1211 rb_check_pages(cpu_buffer); 1213 rb_check_pages(cpu_buffer);
1212 1214
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1868 * Nested commits always have zero deltas, so 1870 * Nested commits always have zero deltas, so
1869 * just reread the time stamp 1871 * just reread the time stamp
1870 */ 1872 */
1871 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1873 *ts = rb_time_stamp(buffer);
1872 next_page->page->time_stamp = *ts; 1874 next_page->page->time_stamp = *ts;
1873 } 1875 }
1874 1876
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2112 goto out_fail; 2114 goto out_fail;
2113 2115
2114 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2116 ts = rb_time_stamp(cpu_buffer->buffer);
2115 2117
2116 /* 2118 /*
2117 * Only the first commit can update the timestamp. 2119 * Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2681EXPORT_SYMBOL_GPL(ring_buffer_entries); 2683EXPORT_SYMBOL_GPL(ring_buffer_entries);
2682 2684
2683/** 2685/**
2684 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2686 * ring_buffer_overruns - get the number of overruns in buffer
2685 * @buffer: The ring buffer 2687 * @buffer: The ring buffer
2686 * 2688 *
2687 * Returns the total number of overruns in the ring buffer 2689 * Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c820b0310a12..b20d3ec75de9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2440 return ret; 2440 return ret;
2441 } 2441 }
2442 2442
2443 filp->f_pos += cnt; 2443 *ppos += cnt;
2444 2444
2445 return cnt; 2445 return cnt;
2446} 2446}
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2582 } 2582 }
2583 mutex_unlock(&trace_types_lock); 2583 mutex_unlock(&trace_types_lock);
2584 2584
2585 filp->f_pos += cnt; 2585 *ppos += cnt;
2586 2586
2587 return cnt; 2587 return cnt;
2588} 2588}
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2764 if (err) 2764 if (err)
2765 return err; 2765 return err;
2766 2766
2767 filp->f_pos += ret; 2767 *ppos += ret;
2768 2768
2769 return ret; 2769 return ret;
2770} 2770}
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3299 } 3299 }
3300 } 3300 }
3301 3301
3302 filp->f_pos += cnt; 3302 *ppos += cnt;
3303 3303
3304 /* If check pages failed, return ENOMEM */ 3304 /* If check pages failed, return ENOMEM */
3305 if (tracing_disabled) 3305 if (tracing_disabled)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ed17565826b0..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
69 * @s: trace sequence descriptor 69 * @s: trace sequence descriptor
70 * @fmt: printf format string 70 * @fmt: printf format string
71 * 71 *
72 * It returns 0 if the trace oversizes the buffer's free
73 * space, 1 otherwise.
74 *
72 * The tracer may use either sequence operations or its own 75 * The tracer may use either sequence operations or its own
73 * copy to user routines. To simplify formating of a trace 76 * copy to user routines. To simplify formating of a trace
74 * trace_seq_printf is used to store strings into a special 77 * trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
95 98
96 s->len += ret; 99 s->len += ret;
97 100
98 return len; 101 return 1;
99} 102}
100EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
101 104
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
685int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
686{ 686{
687 int cpu; 687 int cpu;
688 int orig = -1;
688 struct work_struct *works; 689 struct work_struct *works;
689 690
690 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
692 return -ENOMEM; 693 return -ENOMEM;
693 694
694 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
695 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
696 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
697 707
698 INIT_WORK(work, func); 708 INIT_WORK(work, func);
699 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
700 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
701 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
702 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
703 put_online_cpus(); 718 put_online_cpus();
704 free_percpu(works); 719 free_percpu(works);
705 return 0; 720 return 0;