diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 8 | ||||
-rw-r--r-- | kernel/exit.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 20 | ||||
-rw-r--r-- | kernel/irq/handle.c | 1 | ||||
-rw-r--r-- | kernel/lockdep.c | 20 | ||||
-rw-r--r-- | kernel/mutex-debug.c | 1 | ||||
-rw-r--r-- | kernel/params.c | 17 | ||||
-rw-r--r-- | kernel/perf_event.c | 68 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 11 | ||||
-rw-r--r-- | kernel/power/suspend_test.c | 5 | ||||
-rw-r--r-- | kernel/power/swap.c | 43 | ||||
-rw-r--r-- | kernel/rcutree.c | 44 | ||||
-rw-r--r-- | kernel/rcutree.h | 10 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 46 | ||||
-rw-r--r-- | kernel/sched.c | 25 | ||||
-rw-r--r-- | kernel/sys.c | 25 | ||||
-rw-r--r-- | kernel/sysctl_check.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 8 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 5 | ||||
-rw-r--r-- | kernel/workqueue.c | 39 |
25 files changed, 294 insertions, 138 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ca83b73fba19..0249f4be9b5c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | |||
1710 | return -EFAULT; | 1710 | return -EFAULT; |
1711 | 1711 | ||
1712 | buffer[nbytes] = 0; /* nul-terminate */ | 1712 | buffer[nbytes] = 0; /* nul-terminate */ |
1713 | strstrip(buffer); | ||
1714 | if (cft->write_u64) { | 1713 | if (cft->write_u64) { |
1715 | u64 val = simple_strtoull(buffer, &end, 0); | 1714 | u64 val = simple_strtoull(strstrip(buffer), &end, 0); |
1716 | if (*end) | 1715 | if (*end) |
1717 | return -EINVAL; | 1716 | return -EINVAL; |
1718 | retval = cft->write_u64(cgrp, cft, val); | 1717 | retval = cft->write_u64(cgrp, cft, val); |
1719 | } else { | 1718 | } else { |
1720 | s64 val = simple_strtoll(buffer, &end, 0); | 1719 | s64 val = simple_strtoll(strstrip(buffer), &end, 0); |
1721 | if (*end) | 1720 | if (*end) |
1722 | return -EINVAL; | 1721 | return -EINVAL; |
1723 | retval = cft->write_s64(cgrp, cft, val); | 1722 | retval = cft->write_s64(cgrp, cft, val); |
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft, | |||
1753 | } | 1752 | } |
1754 | 1753 | ||
1755 | buffer[nbytes] = 0; /* nul-terminate */ | 1754 | buffer[nbytes] = 0; /* nul-terminate */ |
1756 | strstrip(buffer); | 1755 | retval = cft->write_string(cgrp, cft, strstrip(buffer)); |
1757 | retval = cft->write_string(cgrp, cft, buffer); | ||
1758 | if (!retval) | 1756 | if (!retval) |
1759 | retval = nbytes; | 1757 | retval = nbytes; |
1760 | out: | 1758 | out: |
diff --git a/kernel/exit.c b/kernel/exit.c index e61891f80123..f7864ac2ecc1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid) | |||
359 | { | 359 | { |
360 | struct task_struct *curr = current->group_leader; | 360 | struct task_struct *curr = current->group_leader; |
361 | 361 | ||
362 | if (task_session(curr) != pid) { | 362 | if (task_session(curr) != pid) |
363 | change_pid(curr, PIDTYPE_SID, pid); | 363 | change_pid(curr, PIDTYPE_SID, pid); |
364 | proc_sid_connector(curr); | ||
365 | } | ||
366 | 364 | ||
367 | if (task_pgrp(curr) != pid) | 365 | if (task_pgrp(curr) != pid) |
368 | change_pid(curr, PIDTYPE_PGID, pid); | 366 | change_pid(curr, PIDTYPE_PGID, pid); |
diff --git a/kernel/fork.c b/kernel/fork.c index 4c20fff8c13a..166b8c49257c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -91,7 +91,7 @@ int nr_processes(void) | |||
91 | int cpu; | 91 | int cpu; |
92 | int total = 0; | 92 | int total = 0; |
93 | 93 | ||
94 | for_each_online_cpu(cpu) | 94 | for_each_possible_cpu(cpu) |
95 | total += per_cpu(process_counts, cpu); | 95 | total += per_cpu(process_counts, cpu); |
96 | 96 | ||
97 | return total; | 97 | return total; |
diff --git a/kernel/futex.c b/kernel/futex.c index 4949d336d88d..fb65e822fc41 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key) | |||
150 | */ | 150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { | 152 | { |
153 | return (key1->both.word == key2->both.word | 153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word | ||
154 | && key1->both.ptr == key2->both.ptr | 155 | && key1->both.ptr == key2->both.ptr |
155 | && key1->both.offset == key2->both.offset); | 156 | && key1->both.offset == key2->both.offset); |
156 | } | 157 | } |
@@ -1028,7 +1029,6 @@ static inline | |||
1028 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 1029 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1029 | struct futex_hash_bucket *hb) | 1030 | struct futex_hash_bucket *hb) |
1030 | { | 1031 | { |
1031 | drop_futex_key_refs(&q->key); | ||
1032 | get_futex_key_refs(key); | 1032 | get_futex_key_refs(key); |
1033 | q->key = *key; | 1033 | q->key = *key; |
1034 | 1034 | ||
@@ -1226,6 +1226,7 @@ retry_private: | |||
1226 | */ | 1226 | */ |
1227 | if (ret == 1) { | 1227 | if (ret == 1) { |
1228 | WARN_ON(pi_state); | 1228 | WARN_ON(pi_state); |
1229 | drop_count++; | ||
1229 | task_count++; | 1230 | task_count++; |
1230 | ret = get_futex_value_locked(&curval2, uaddr2); | 1231 | ret = get_futex_value_locked(&curval2, uaddr2); |
1231 | if (!ret) | 1232 | if (!ret) |
@@ -1304,6 +1305,7 @@ retry_private: | |||
1304 | if (ret == 1) { | 1305 | if (ret == 1) { |
1305 | /* We got the lock. */ | 1306 | /* We got the lock. */ |
1306 | requeue_pi_wake_futex(this, &key2, hb2); | 1307 | requeue_pi_wake_futex(this, &key2, hb2); |
1308 | drop_count++; | ||
1307 | continue; | 1309 | continue; |
1308 | } else if (ret) { | 1310 | } else if (ret) { |
1309 | /* -EDEADLK */ | 1311 | /* -EDEADLK */ |
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1791 | current->timer_slack_ns); | 1793 | current->timer_slack_ns); |
1792 | } | 1794 | } |
1793 | 1795 | ||
1796 | retry: | ||
1794 | /* Prepare to wait on uaddr. */ | 1797 | /* Prepare to wait on uaddr. */ |
1795 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1798 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1796 | if (ret) | 1799 | if (ret) |
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1808 | goto out_put_key; | 1811 | goto out_put_key; |
1809 | 1812 | ||
1810 | /* | 1813 | /* |
1811 | * We expect signal_pending(current), but another thread may | 1814 | * We expect signal_pending(current), but we might be the |
1812 | * have handled it for us already. | 1815 | * victim of a spurious wakeup as well. |
1813 | */ | 1816 | */ |
1817 | if (!signal_pending(current)) { | ||
1818 | put_futex_key(fshared, &q.key); | ||
1819 | goto retry; | ||
1820 | } | ||
1821 | |||
1814 | ret = -ERESTARTSYS; | 1822 | ret = -ERESTARTSYS; |
1815 | if (!abs_time) | 1823 | if (!abs_time) |
1816 | goto out_put_key; | 1824 | goto out_put_key; |
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2118 | */ | 2126 | */ |
2119 | plist_del(&q->list, &q->list.plist); | 2127 | plist_del(&q->list, &q->list.plist); |
2120 | 2128 | ||
2129 | /* Handle spurious wakeups gracefully */ | ||
2130 | ret = -EWOULDBLOCK; | ||
2121 | if (timeout && !timeout->task) | 2131 | if (timeout && !timeout->task) |
2122 | ret = -ETIMEDOUT; | 2132 | ret = -ETIMEDOUT; |
2123 | else | 2133 | else if (signal_pending(current)) |
2124 | ret = -ERESTARTNOINTR; | 2134 | ret = -ERESTARTNOINTR; |
2125 | } | 2135 | } |
2126 | return ret; | 2136 | return ret; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a81cf80554db..17c71bb565c6 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/random.h> | 17 | #include <linux/random.h> |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3815ac1d58b2..9af56723c096 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
144 | 144 | ||
145 | static inline u64 lockstat_clock(void) | ||
146 | { | ||
147 | return cpu_clock(smp_processor_id()); | ||
148 | } | ||
149 | |||
145 | static int lock_point(unsigned long points[], unsigned long ip) | 150 | static int lock_point(unsigned long points[], unsigned long ip) |
146 | { | 151 | { |
147 | int i; | 152 | int i; |
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) | |||
158 | return i; | 163 | return i; |
159 | } | 164 | } |
160 | 165 | ||
161 | static void lock_time_inc(struct lock_time *lt, s64 time) | 166 | static void lock_time_inc(struct lock_time *lt, u64 time) |
162 | { | 167 | { |
163 | if (time > lt->max) | 168 | if (time > lt->max) |
164 | lt->max = time; | 169 | lt->max = time; |
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) | |||
234 | static void lock_release_holdtime(struct held_lock *hlock) | 239 | static void lock_release_holdtime(struct held_lock *hlock) |
235 | { | 240 | { |
236 | struct lock_class_stats *stats; | 241 | struct lock_class_stats *stats; |
237 | s64 holdtime; | 242 | u64 holdtime; |
238 | 243 | ||
239 | if (!lock_stat) | 244 | if (!lock_stat) |
240 | return; | 245 | return; |
241 | 246 | ||
242 | holdtime = sched_clock() - hlock->holdtime_stamp; | 247 | holdtime = lockstat_clock() - hlock->holdtime_stamp; |
243 | 248 | ||
244 | stats = get_lock_stats(hlock_class(hlock)); | 249 | stats = get_lock_stats(hlock_class(hlock)); |
245 | if (hlock->read) | 250 | if (hlock->read) |
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2792 | hlock->references = references; | 2797 | hlock->references = references; |
2793 | #ifdef CONFIG_LOCK_STAT | 2798 | #ifdef CONFIG_LOCK_STAT |
2794 | hlock->waittime_stamp = 0; | 2799 | hlock->waittime_stamp = 0; |
2795 | hlock->holdtime_stamp = sched_clock(); | 2800 | hlock->holdtime_stamp = lockstat_clock(); |
2796 | #endif | 2801 | #endif |
2797 | 2802 | ||
2798 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2803 | if (check == 2 && !mark_irqflags(curr, hlock)) |
@@ -3322,7 +3327,7 @@ found_it: | |||
3322 | if (hlock->instance != lock) | 3327 | if (hlock->instance != lock) |
3323 | return; | 3328 | return; |
3324 | 3329 | ||
3325 | hlock->waittime_stamp = sched_clock(); | 3330 | hlock->waittime_stamp = lockstat_clock(); |
3326 | 3331 | ||
3327 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); | 3332 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
3328 | contending_point = lock_point(hlock_class(hlock)->contending_point, | 3333 | contending_point = lock_point(hlock_class(hlock)->contending_point, |
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3345 | struct held_lock *hlock, *prev_hlock; | 3350 | struct held_lock *hlock, *prev_hlock; |
3346 | struct lock_class_stats *stats; | 3351 | struct lock_class_stats *stats; |
3347 | unsigned int depth; | 3352 | unsigned int depth; |
3348 | u64 now; | 3353 | u64 now, waittime = 0; |
3349 | s64 waittime = 0; | ||
3350 | int i, cpu; | 3354 | int i, cpu; |
3351 | 3355 | ||
3352 | depth = curr->lockdep_depth; | 3356 | depth = curr->lockdep_depth; |
@@ -3374,7 +3378,7 @@ found_it: | |||
3374 | 3378 | ||
3375 | cpu = smp_processor_id(); | 3379 | cpu = smp_processor_id(); |
3376 | if (hlock->waittime_stamp) { | 3380 | if (hlock->waittime_stamp) { |
3377 | now = sched_clock(); | 3381 | now = lockstat_clock(); |
3378 | waittime = now - hlock->waittime_stamp; | 3382 | waittime = now - hlock->waittime_stamp; |
3379 | hlock->holdtime_stamp = now; | 3383 | hlock->holdtime_stamp = now; |
3380 | } | 3384 | } |
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 50d022e5a560..ec815a960b5d 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/poison.h> | 18 | #include <linux/poison.h> |
19 | #include <linux/sched.h> | ||
19 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
20 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
diff --git a/kernel/params.c b/kernel/params.c index 9da58eabdcb2..d656c276508d 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp) | |||
218 | return -ENOSPC; | 218 | return -ENOSPC; |
219 | } | 219 | } |
220 | 220 | ||
221 | if (kp->flags & KPARAM_KMALLOCED) | ||
222 | kfree(*(char **)kp->arg); | ||
223 | |||
224 | /* This is a hack. We can't need to strdup in early boot, and we | 221 | /* This is a hack. We can't need to strdup in early boot, and we |
225 | * don't need to; this mangled commandline is preserved. */ | 222 | * don't need to; this mangled commandline is preserved. */ |
226 | if (slab_is_available()) { | 223 | if (slab_is_available()) { |
227 | kp->flags |= KPARAM_KMALLOCED; | ||
228 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); | 224 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); |
229 | if (!kp->arg) | 225 | if (!*(char **)kp->arg) |
230 | return -ENOMEM; | 226 | return -ENOMEM; |
231 | } else | 227 | } else |
232 | *(const char **)kp->arg = val; | 228 | *(const char **)kp->arg = val; |
@@ -304,6 +300,7 @@ static int param_array(const char *name, | |||
304 | unsigned int min, unsigned int max, | 300 | unsigned int min, unsigned int max, |
305 | void *elem, int elemsize, | 301 | void *elem, int elemsize, |
306 | int (*set)(const char *, struct kernel_param *kp), | 302 | int (*set)(const char *, struct kernel_param *kp), |
303 | u16 flags, | ||
307 | unsigned int *num) | 304 | unsigned int *num) |
308 | { | 305 | { |
309 | int ret; | 306 | int ret; |
@@ -313,6 +310,7 @@ static int param_array(const char *name, | |||
313 | /* Get the name right for errors. */ | 310 | /* Get the name right for errors. */ |
314 | kp.name = name; | 311 | kp.name = name; |
315 | kp.arg = elem; | 312 | kp.arg = elem; |
313 | kp.flags = flags; | ||
316 | 314 | ||
317 | /* No equals sign? */ | 315 | /* No equals sign? */ |
318 | if (!val) { | 316 | if (!val) { |
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp) | |||
358 | unsigned int temp_num; | 356 | unsigned int temp_num; |
359 | 357 | ||
360 | return param_array(kp->name, val, 1, arr->max, arr->elem, | 358 | return param_array(kp->name, val, 1, arr->max, arr->elem, |
361 | arr->elemsize, arr->set, arr->num ?: &temp_num); | 359 | arr->elemsize, arr->set, kp->flags, |
360 | arr->num ?: &temp_num); | ||
362 | } | 361 | } |
363 | 362 | ||
364 | int param_array_get(char *buffer, struct kernel_param *kp) | 363 | int param_array_get(char *buffer, struct kernel_param *kp) |
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod) | |||
605 | 604 | ||
606 | void destroy_params(const struct kernel_param *params, unsigned num) | 605 | void destroy_params(const struct kernel_param *params, unsigned num) |
607 | { | 606 | { |
608 | unsigned int i; | 607 | /* FIXME: This should free kmalloced charp parameters. It doesn't. */ |
609 | |||
610 | for (i = 0; i < num; i++) | ||
611 | if (params[i].flags & KPARAM_KMALLOCED) | ||
612 | kfree(*(char **)params[i].arg); | ||
613 | } | 608 | } |
614 | 609 | ||
615 | static void __init kernel_add_sysfs_param(const char *name, | 610 | static void __init kernel_add_sysfs_param(const char *name, |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..7f29643c8985 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1355 | u64 interrupts, freq; | 1355 | u64 interrupts, freq; |
1356 | 1356 | ||
1357 | spin_lock(&ctx->lock); | 1357 | spin_lock(&ctx->lock); |
1358 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1358 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1360 | continue; | 1360 | continue; |
1361 | 1361 | ||
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3959 | regs = task_pt_regs(current); | 3959 | regs = task_pt_regs(current); |
3960 | 3960 | ||
3961 | if (regs) { | 3961 | if (regs) { |
3962 | if (perf_event_overflow(event, 0, &data, regs)) | 3962 | if (!(event->attr.exclude_idle && current->pid == 0)) |
3963 | ret = HRTIMER_NORESTART; | 3963 | if (perf_event_overflow(event, 0, &data, regs)) |
3964 | ret = HRTIMER_NORESTART; | ||
3964 | } | 3965 | } |
3965 | 3966 | ||
3966 | period = max_t(u64, 10000, event->hw.sample_period); | 3967 | period = max_t(u64, 10000, event->hw.sample_period); |
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3969 | return ret; | 3970 | return ret; |
3970 | } | 3971 | } |
3971 | 3972 | ||
3973 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3974 | { | ||
3975 | struct hw_perf_event *hwc = &event->hw; | ||
3976 | |||
3977 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3978 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3979 | if (hwc->sample_period) { | ||
3980 | u64 period; | ||
3981 | |||
3982 | if (hwc->remaining) { | ||
3983 | if (hwc->remaining < 0) | ||
3984 | period = 10000; | ||
3985 | else | ||
3986 | period = hwc->remaining; | ||
3987 | hwc->remaining = 0; | ||
3988 | } else { | ||
3989 | period = max_t(u64, 10000, hwc->sample_period); | ||
3990 | } | ||
3991 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3992 | ns_to_ktime(period), 0, | ||
3993 | HRTIMER_MODE_REL, 0); | ||
3994 | } | ||
3995 | } | ||
3996 | |||
3997 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
3998 | { | ||
3999 | struct hw_perf_event *hwc = &event->hw; | ||
4000 | |||
4001 | if (hwc->sample_period) { | ||
4002 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4003 | hwc->remaining = ktime_to_ns(remaining); | ||
4004 | |||
4005 | hrtimer_cancel(&hwc->hrtimer); | ||
4006 | } | ||
4007 | } | ||
4008 | |||
3972 | /* | 4009 | /* |
3973 | * Software event: cpu wall time clock | 4010 | * Software event: cpu wall time clock |
3974 | */ | 4011 | */ |
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
3991 | int cpu = raw_smp_processor_id(); | 4028 | int cpu = raw_smp_processor_id(); |
3992 | 4029 | ||
3993 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4030 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4031 | perf_swevent_start_hrtimer(event); |
3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3996 | if (hwc->sample_period) { | ||
3997 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3998 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3999 | ns_to_ktime(period), 0, | ||
4000 | HRTIMER_MODE_REL, 0); | ||
4001 | } | ||
4002 | 4032 | ||
4003 | return 0; | 4033 | return 0; |
4004 | } | 4034 | } |
4005 | 4035 | ||
4006 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4036 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4007 | { | 4037 | { |
4008 | if (event->hw.sample_period) | 4038 | perf_swevent_cancel_hrtimer(event); |
4009 | hrtimer_cancel(&event->hw.hrtimer); | ||
4010 | cpu_clock_perf_event_update(event); | 4039 | cpu_clock_perf_event_update(event); |
4011 | } | 4040 | } |
4012 | 4041 | ||
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4043 | now = event->ctx->time; | 4072 | now = event->ctx->time; |
4044 | 4073 | ||
4045 | atomic64_set(&hwc->prev_count, now); | 4074 | atomic64_set(&hwc->prev_count, now); |
4046 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4075 | |
4047 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4076 | perf_swevent_start_hrtimer(event); |
4048 | if (hwc->sample_period) { | ||
4049 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4050 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4051 | ns_to_ktime(period), 0, | ||
4052 | HRTIMER_MODE_REL, 0); | ||
4053 | } | ||
4054 | 4077 | ||
4055 | return 0; | 4078 | return 0; |
4056 | } | 4079 | } |
4057 | 4080 | ||
4058 | static void task_clock_perf_event_disable(struct perf_event *event) | 4081 | static void task_clock_perf_event_disable(struct perf_event *event) |
4059 | { | 4082 | { |
4060 | if (event->hw.sample_period) | 4083 | perf_swevent_cancel_hrtimer(event); |
4061 | hrtimer_cancel(&event->hw.hrtimer); | ||
4062 | task_clock_perf_event_update(event, event->ctx->time); | 4084 | task_clock_perf_event_update(event, event->ctx->time); |
4063 | 4085 | ||
4064 | } | 4086 | } |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 04b3a83d686f..04a9e90d248f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -693,21 +693,22 @@ static int software_resume(void) | |||
693 | /* The snapshot device should not be opened while we're running */ | 693 | /* The snapshot device should not be opened while we're running */ |
694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
695 | error = -EBUSY; | 695 | error = -EBUSY; |
696 | swsusp_close(FMODE_READ); | ||
696 | goto Unlock; | 697 | goto Unlock; |
697 | } | 698 | } |
698 | 699 | ||
699 | pm_prepare_console(); | 700 | pm_prepare_console(); |
700 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 701 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
701 | if (error) | 702 | if (error) |
702 | goto Finish; | 703 | goto close_finish; |
703 | 704 | ||
704 | error = usermodehelper_disable(); | 705 | error = usermodehelper_disable(); |
705 | if (error) | 706 | if (error) |
706 | goto Finish; | 707 | goto close_finish; |
707 | 708 | ||
708 | error = create_basic_memory_bitmaps(); | 709 | error = create_basic_memory_bitmaps(); |
709 | if (error) | 710 | if (error) |
710 | goto Finish; | 711 | goto close_finish; |
711 | 712 | ||
712 | pr_debug("PM: Preparing processes for restore.\n"); | 713 | pr_debug("PM: Preparing processes for restore.\n"); |
713 | error = prepare_processes(); | 714 | error = prepare_processes(); |
@@ -719,6 +720,7 @@ static int software_resume(void) | |||
719 | pr_debug("PM: Reading hibernation image.\n"); | 720 | pr_debug("PM: Reading hibernation image.\n"); |
720 | 721 | ||
721 | error = swsusp_read(&flags); | 722 | error = swsusp_read(&flags); |
723 | swsusp_close(FMODE_READ); | ||
722 | if (!error) | 724 | if (!error) |
723 | hibernation_restore(flags & SF_PLATFORM_MODE); | 725 | hibernation_restore(flags & SF_PLATFORM_MODE); |
724 | 726 | ||
@@ -737,6 +739,9 @@ static int software_resume(void) | |||
737 | mutex_unlock(&pm_mutex); | 739 | mutex_unlock(&pm_mutex); |
738 | pr_debug("PM: Resume from disk failed.\n"); | 740 | pr_debug("PM: Resume from disk failed.\n"); |
739 | return error; | 741 | return error; |
742 | close_finish: | ||
743 | swsusp_close(FMODE_READ); | ||
744 | goto Finish; | ||
740 | } | 745 | } |
741 | 746 | ||
742 | late_initcall(software_resume); | 747 | late_initcall(software_resume); |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 17d8bb1acf9c..25596e450ac7 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * The time it takes is system-specific though, so when we test this | 19 | * The time it takes is system-specific though, so when we test this |
20 | * during system bootup we allow a LOT of time. | 20 | * during system bootup we allow a LOT of time. |
21 | */ | 21 | */ |
22 | #define TEST_SUSPEND_SECONDS 5 | 22 | #define TEST_SUSPEND_SECONDS 10 |
23 | 23 | ||
24 | static unsigned long suspend_test_start_time; | 24 | static unsigned long suspend_test_start_time; |
25 | 25 | ||
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label) | |||
49 | * has some performance issues. The stack dump of a WARN_ON | 49 | * has some performance issues. The stack dump of a WARN_ON |
50 | * is more likely to get the right attention than a printk... | 50 | * is more likely to get the right attention than a printk... |
51 | */ | 51 | */ |
52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); | 52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), |
53 | "Component: %s, time: %u\n", label, msec); | ||
53 | } | 54 | } |
54 | 55 | ||
55 | /* | 56 | /* |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b101cdc4df3f..890f6b11b1d3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle, | |||
314 | { | 314 | { |
315 | unsigned int m; | 315 | unsigned int m; |
316 | int ret; | 316 | int ret; |
317 | int error = 0; | ||
318 | int nr_pages; | 317 | int nr_pages; |
319 | int err2; | 318 | int err2; |
320 | struct bio *bio; | 319 | struct bio *bio; |
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle, | |||
329 | nr_pages = 0; | 328 | nr_pages = 0; |
330 | bio = NULL; | 329 | bio = NULL; |
331 | do_gettimeofday(&start); | 330 | do_gettimeofday(&start); |
332 | do { | 331 | while (1) { |
333 | ret = snapshot_read_next(snapshot, PAGE_SIZE); | 332 | ret = snapshot_read_next(snapshot, PAGE_SIZE); |
334 | if (ret > 0) { | 333 | if (ret <= 0) |
335 | error = swap_write_page(handle, data_of(*snapshot), | 334 | break; |
336 | &bio); | 335 | ret = swap_write_page(handle, data_of(*snapshot), &bio); |
337 | if (error) | 336 | if (ret) |
338 | break; | 337 | break; |
339 | if (!(nr_pages % m)) | 338 | if (!(nr_pages % m)) |
340 | printk("\b\b\b\b%3d%%", nr_pages / m); | 339 | printk("\b\b\b\b%3d%%", nr_pages / m); |
341 | nr_pages++; | 340 | nr_pages++; |
342 | } | 341 | } |
343 | } while (ret > 0); | ||
344 | err2 = wait_on_bio_chain(&bio); | 342 | err2 = wait_on_bio_chain(&bio); |
345 | do_gettimeofday(&stop); | 343 | do_gettimeofday(&stop); |
346 | if (!error) | 344 | if (!ret) |
347 | error = err2; | 345 | ret = err2; |
348 | if (!error) | 346 | if (!ret) |
349 | printk("\b\b\b\bdone\n"); | 347 | printk("\b\b\b\bdone\n"); |
348 | else | ||
349 | printk("\n"); | ||
350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
351 | return error; | 351 | return ret; |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle, | |||
536 | snapshot_write_finalize(snapshot); | 536 | snapshot_write_finalize(snapshot); |
537 | if (!snapshot_image_loaded(snapshot)) | 537 | if (!snapshot_image_loaded(snapshot)) |
538 | error = -ENODATA; | 538 | error = -ENODATA; |
539 | } | 539 | } else |
540 | printk("\n"); | ||
540 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 541 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
541 | return error; | 542 | return error; |
542 | } | 543 | } |
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p) | |||
572 | error = load_image(&handle, &snapshot, header->pages - 1); | 573 | error = load_image(&handle, &snapshot, header->pages - 1); |
573 | release_swap_reader(&handle); | 574 | release_swap_reader(&handle); |
574 | 575 | ||
575 | blkdev_put(resume_bdev, FMODE_READ); | ||
576 | |||
577 | if (!error) | 576 | if (!error) |
578 | pr_debug("PM: Image successfully loaded\n"); | 577 | pr_debug("PM: Image successfully loaded\n"); |
579 | else | 578 | else |
@@ -596,7 +595,7 @@ int swsusp_check(void) | |||
596 | error = bio_read_page(swsusp_resume_block, | 595 | error = bio_read_page(swsusp_resume_block, |
597 | swsusp_header, NULL); | 596 | swsusp_header, NULL); |
598 | if (error) | 597 | if (error) |
599 | return error; | 598 | goto put; |
600 | 599 | ||
601 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { | 600 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { |
602 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 601 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
@@ -604,8 +603,10 @@ int swsusp_check(void) | |||
604 | error = bio_write_page(swsusp_resume_block, | 603 | error = bio_write_page(swsusp_resume_block, |
605 | swsusp_header, NULL); | 604 | swsusp_header, NULL); |
606 | } else { | 605 | } else { |
607 | return -EINVAL; | 606 | error = -EINVAL; |
608 | } | 607 | } |
608 | |||
609 | put: | ||
609 | if (error) | 610 | if (error) |
610 | blkdev_put(resume_bdev, FMODE_READ); | 611 | blkdev_put(resume_bdev, FMODE_READ); |
611 | else | 612 | else |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 705f02ac7433..0536125b0497 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -913,7 +913,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
914 | break; | 914 | break; |
915 | } | 915 | } |
916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 916 | |
917 | /* | ||
918 | * If there was a task blocking the current grace period, | ||
919 | * and if all CPUs have checked in, we need to propagate | ||
920 | * the quiescent state up the rcu_node hierarchy. But that | ||
921 | * is inconvenient at the moment due to deadlock issues if | ||
922 | * this should end the current grace period. So set the | ||
923 | * offlined CPU's bit in ->qsmask in order to force the | ||
924 | * next force_quiescent_state() invocation to clean up this | ||
925 | * mess in a deadlock-free manner. | ||
926 | */ | ||
927 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
928 | rnp->qsmask |= mask; | ||
929 | |||
917 | mask = rnp->grpmask; | 930 | mask = rnp->grpmask; |
918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 931 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
919 | rnp = rnp->parent; | 932 | rnp = rnp->parent; |
@@ -958,7 +971,7 @@ static void rcu_offline_cpu(int cpu) | |||
958 | * Invoke any RCU callbacks that have made it to the end of their grace | 971 | * Invoke any RCU callbacks that have made it to the end of their grace |
959 | * period. Thottle as specified by rdp->blimit. | 972 | * period. Thottle as specified by rdp->blimit. |
960 | */ | 973 | */ |
961 | static void rcu_do_batch(struct rcu_data *rdp) | 974 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
962 | { | 975 | { |
963 | unsigned long flags; | 976 | unsigned long flags; |
964 | struct rcu_head *next, *list, **tail; | 977 | struct rcu_head *next, *list, **tail; |
@@ -1011,6 +1024,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
1011 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1024 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
1012 | rdp->blimit = blimit; | 1025 | rdp->blimit = blimit; |
1013 | 1026 | ||
1027 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1028 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1029 | rdp->qlen_last_fqs_check = 0; | ||
1030 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1031 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1032 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1033 | |||
1014 | local_irq_restore(flags); | 1034 | local_irq_restore(flags); |
1015 | 1035 | ||
1016 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1036 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1224,7 +1244,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1224 | } | 1244 | } |
1225 | 1245 | ||
1226 | /* If there are callbacks ready, invoke them. */ | 1246 | /* If there are callbacks ready, invoke them. */ |
1227 | rcu_do_batch(rdp); | 1247 | rcu_do_batch(rsp, rdp); |
1228 | } | 1248 | } |
1229 | 1249 | ||
1230 | /* | 1250 | /* |
@@ -1288,10 +1308,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1288 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1308 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1289 | } | 1309 | } |
1290 | 1310 | ||
1291 | /* Force the grace period if too many callbacks or too long waiting. */ | 1311 | /* |
1292 | if (unlikely(++rdp->qlen > qhimark)) { | 1312 | * Force the grace period if too many callbacks or too long waiting. |
1313 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1314 | * if some other CPU has recently done so. Also, don't bother | ||
1315 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1316 | * is the only one waiting for a grace period to complete. | ||
1317 | */ | ||
1318 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1293 | rdp->blimit = LONG_MAX; | 1319 | rdp->blimit = LONG_MAX; |
1294 | force_quiescent_state(rsp, 0); | 1320 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1321 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1322 | force_quiescent_state(rsp, 0); | ||
1323 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1324 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1295 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1325 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1296 | force_quiescent_state(rsp, 1); | 1326 | force_quiescent_state(rsp, 1); |
1297 | local_irq_restore(flags); | 1327 | local_irq_restore(flags); |
@@ -1523,6 +1553,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1523 | rdp->beenonline = 1; /* We have now been online. */ | 1553 | rdp->beenonline = 1; /* We have now been online. */ |
1524 | rdp->preemptable = preemptable; | 1554 | rdp->preemptable = preemptable; |
1525 | rdp->passed_quiesc_completed = lastcomp - 1; | 1555 | rdp->passed_quiesc_completed = lastcomp - 1; |
1556 | rdp->qlen_last_fqs_check = 0; | ||
1557 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1526 | rdp->blimit = blimit; | 1558 | rdp->blimit = blimit; |
1527 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1559 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1528 | 1560 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index b40ac5706040..1823c6e20609 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -167,6 +167,10 @@ struct rcu_data { | |||
167 | struct rcu_head *nxtlist; | 167 | struct rcu_head *nxtlist; |
168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
169 | long qlen; /* # of queued callbacks */ | 169 | long qlen; /* # of queued callbacks */ |
170 | long qlen_last_fqs_check; | ||
171 | /* qlen at last check for QS forcing */ | ||
172 | unsigned long n_force_qs_snap; | ||
173 | /* did other CPU force QS recently? */ | ||
170 | long blimit; /* Upper limit on a processed batch */ | 174 | long blimit; /* Upper limit on a processed batch */ |
171 | 175 | ||
172 | #ifdef CONFIG_NO_HZ | 176 | #ifdef CONFIG_NO_HZ |
@@ -302,9 +306,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp); | |||
302 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 306 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
303 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 307 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
304 | #ifdef CONFIG_HOTPLUG_CPU | 308 | #ifdef CONFIG_HOTPLUG_CPU |
305 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 309 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
306 | struct rcu_node *rnp, | 310 | struct rcu_node *rnp, |
307 | struct rcu_data *rdp); | 311 | struct rcu_data *rdp); |
308 | static void rcu_preempt_offline_cpu(int cpu); | 312 | static void rcu_preempt_offline_cpu(int cpu); |
309 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 313 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
310 | static void rcu_preempt_check_callbacks(int cpu); | 314 | static void rcu_preempt_check_callbacks(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c0cb783aa16a..ef2a58c2b9d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
304 | * parent is to remove the need for rcu_read_unlock_special() to | 304 | * parent is to remove the need for rcu_read_unlock_special() to |
305 | * make more than two attempts to acquire the target rcu_node's lock. | 305 | * make more than two attempts to acquire the target rcu_node's lock. |
306 | * | 306 | * |
307 | * Returns 1 if there was previously a task blocking the current grace | ||
308 | * period on the specified rcu_node structure. | ||
309 | * | ||
307 | * The caller must hold rnp->lock with irqs disabled. | 310 | * The caller must hold rnp->lock with irqs disabled. |
308 | */ | 311 | */ |
309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 312 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
310 | struct rcu_node *rnp, | 313 | struct rcu_node *rnp, |
311 | struct rcu_data *rdp) | 314 | struct rcu_data *rdp) |
312 | { | 315 | { |
313 | int i; | 316 | int i; |
314 | struct list_head *lp; | 317 | struct list_head *lp; |
315 | struct list_head *lp_root; | 318 | struct list_head *lp_root; |
319 | int retval = rcu_preempted_readers(rnp); | ||
316 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 320 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
317 | struct task_struct *tp; | 321 | struct task_struct *tp; |
318 | 322 | ||
319 | if (rnp == rnp_root) { | 323 | if (rnp == rnp_root) { |
320 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 324 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
321 | return; /* Shouldn't happen: at least one CPU online. */ | 325 | return 0; /* Shouldn't happen: at least one CPU online. */ |
322 | } | 326 | } |
323 | WARN_ON_ONCE(rnp != rdp->mynode && | 327 | WARN_ON_ONCE(rnp != rdp->mynode && |
324 | (!list_empty(&rnp->blocked_tasks[0]) || | 328 | (!list_empty(&rnp->blocked_tasks[0]) || |
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
342 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
343 | } | 347 | } |
344 | } | 348 | } |
349 | |||
350 | return retval; | ||
345 | } | 351 | } |
346 | 352 | ||
347 | /* | 353 | /* |
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
393 | EXPORT_SYMBOL_GPL(call_rcu); | 399 | EXPORT_SYMBOL_GPL(call_rcu); |
394 | 400 | ||
395 | /* | 401 | /* |
402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | ||
403 | * grace period, but this is the crude slow compatability hack, so just | ||
404 | * invoke synchronize_rcu(). | ||
405 | */ | ||
406 | void synchronize_rcu_expedited(void) | ||
407 | { | ||
408 | synchronize_rcu(); | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
411 | |||
412 | /* | ||
396 | * Check to see if there is any immediate preemptable-RCU-related work | 413 | * Check to see if there is any immediate preemptable-RCU-related work |
397 | * to be done. | 414 | * to be done. |
398 | */ | 415 | */ |
@@ -521,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
521 | 538 | ||
522 | /* | 539 | /* |
523 | * Because preemptable RCU does not exist, it never needs to migrate | 540 | * Because preemptable RCU does not exist, it never needs to migrate |
524 | * tasks that were blocked within RCU read-side critical sections. | 541 | * tasks that were blocked within RCU read-side critical sections, and |
542 | * such non-existent tasks cannot possibly have been blocking the current | ||
543 | * grace period. | ||
525 | */ | 544 | */ |
526 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 545 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
527 | struct rcu_node *rnp, | 546 | struct rcu_node *rnp, |
528 | struct rcu_data *rdp) | 547 | struct rcu_data *rdp) |
529 | { | 548 | { |
549 | return 0; | ||
530 | } | 550 | } |
531 | 551 | ||
532 | /* | 552 | /* |
@@ -565,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
565 | EXPORT_SYMBOL_GPL(call_rcu); | 585 | EXPORT_SYMBOL_GPL(call_rcu); |
566 | 586 | ||
567 | /* | 587 | /* |
588 | * Wait for an rcu-preempt grace period, but make it happen quickly. | ||
589 | * But because preemptable RCU does not exist, map to rcu-sched. | ||
590 | */ | ||
591 | void synchronize_rcu_expedited(void) | ||
592 | { | ||
593 | synchronize_sched_expedited(); | ||
594 | } | ||
595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
596 | |||
597 | /* | ||
568 | * Because preemptable RCU does not exist, it never has any work to do. | 598 | * Because preemptable RCU does not exist, it never has any work to do. |
569 | */ | 599 | */ |
570 | static int rcu_preempt_pending(int cpu) | 600 | static int rcu_preempt_pending(int cpu) |
diff --git a/kernel/sched.c b/kernel/sched.c index 5cb7d637e33a..28dd4f490bfc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1564,11 +1564,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1564 | 1564 | ||
1565 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1565 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1566 | 1566 | ||
1567 | struct update_shares_data { | 1567 | static __read_mostly unsigned long *update_shares_data; |
1568 | unsigned long rq_weight[NR_CPUS]; | ||
1569 | }; | ||
1570 | |||
1571 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1572 | 1568 | ||
1573 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1569 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1574 | 1570 | ||
@@ -1578,12 +1574,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1578 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1574 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1579 | unsigned long sd_shares, | 1575 | unsigned long sd_shares, |
1580 | unsigned long sd_rq_weight, | 1576 | unsigned long sd_rq_weight, |
1581 | struct update_shares_data *usd) | 1577 | unsigned long *usd_rq_weight) |
1582 | { | 1578 | { |
1583 | unsigned long shares, rq_weight; | 1579 | unsigned long shares, rq_weight; |
1584 | int boost = 0; | 1580 | int boost = 0; |
1585 | 1581 | ||
1586 | rq_weight = usd->rq_weight[cpu]; | 1582 | rq_weight = usd_rq_weight[cpu]; |
1587 | if (!rq_weight) { | 1583 | if (!rq_weight) { |
1588 | boost = 1; | 1584 | boost = 1; |
1589 | rq_weight = NICE_0_LOAD; | 1585 | rq_weight = NICE_0_LOAD; |
@@ -1618,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1618 | static int tg_shares_up(struct task_group *tg, void *data) | 1614 | static int tg_shares_up(struct task_group *tg, void *data) |
1619 | { | 1615 | { |
1620 | unsigned long weight, rq_weight = 0, shares = 0; | 1616 | unsigned long weight, rq_weight = 0, shares = 0; |
1621 | struct update_shares_data *usd; | 1617 | unsigned long *usd_rq_weight; |
1622 | struct sched_domain *sd = data; | 1618 | struct sched_domain *sd = data; |
1623 | unsigned long flags; | 1619 | unsigned long flags; |
1624 | int i; | 1620 | int i; |
@@ -1627,11 +1623,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1627 | return 0; | 1623 | return 0; |
1628 | 1624 | ||
1629 | local_irq_save(flags); | 1625 | local_irq_save(flags); |
1630 | usd = &__get_cpu_var(update_shares_data); | 1626 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1631 | 1627 | ||
1632 | for_each_cpu(i, sched_domain_span(sd)) { | 1628 | for_each_cpu(i, sched_domain_span(sd)) { |
1633 | weight = tg->cfs_rq[i]->load.weight; | 1629 | weight = tg->cfs_rq[i]->load.weight; |
1634 | usd->rq_weight[i] = weight; | 1630 | usd_rq_weight[i] = weight; |
1635 | 1631 | ||
1636 | /* | 1632 | /* |
1637 | * If there are currently no tasks on the cpu pretend there | 1633 | * If there are currently no tasks on the cpu pretend there |
@@ -1652,7 +1648,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1652 | shares = tg->shares; | 1648 | shares = tg->shares; |
1653 | 1649 | ||
1654 | for_each_cpu(i, sched_domain_span(sd)) | 1650 | for_each_cpu(i, sched_domain_span(sd)) |
1655 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1651 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1656 | 1652 | ||
1657 | local_irq_restore(flags); | 1653 | local_irq_restore(flags); |
1658 | 1654 | ||
@@ -6756,9 +6752,6 @@ EXPORT_SYMBOL(yield); | |||
6756 | /* | 6752 | /* |
6757 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6753 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6758 | * that process accounting knows that this is a task in IO wait state. | 6754 | * that process accounting knows that this is a task in IO wait state. |
6759 | * | ||
6760 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6761 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6762 | */ | 6755 | */ |
6763 | void __sched io_schedule(void) | 6756 | void __sched io_schedule(void) |
6764 | { | 6757 | { |
@@ -9442,6 +9435,10 @@ void __init sched_init(void) | |||
9442 | #endif /* CONFIG_USER_SCHED */ | 9435 | #endif /* CONFIG_USER_SCHED */ |
9443 | #endif /* CONFIG_GROUP_SCHED */ | 9436 | #endif /* CONFIG_GROUP_SCHED */ |
9444 | 9437 | ||
9438 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9439 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9440 | __alignof__(unsigned long)); | ||
9441 | #endif | ||
9445 | for_each_possible_cpu(i) { | 9442 | for_each_possible_cpu(i) { |
9446 | struct rq *rq; | 9443 | struct rq *rq; |
9447 | 9444 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 255475d163e0..ce17760d9c51 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid) | |||
1110 | err = session; | 1110 | err = session; |
1111 | out: | 1111 | out: |
1112 | write_unlock_irq(&tasklist_lock); | 1112 | write_unlock_irq(&tasklist_lock); |
1113 | if (err > 0) | ||
1114 | proc_sid_connector(group_leader); | ||
1113 | return err; | 1115 | return err; |
1114 | } | 1116 | } |
1115 | 1117 | ||
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1546 | if (arg4 | arg5) | 1548 | if (arg4 | arg5) |
1547 | return -EINVAL; | 1549 | return -EINVAL; |
1548 | switch (arg2) { | 1550 | switch (arg2) { |
1549 | case 0: | 1551 | case PR_MCE_KILL_CLEAR: |
1550 | if (arg3 != 0) | 1552 | if (arg3 != 0) |
1551 | return -EINVAL; | 1553 | return -EINVAL; |
1552 | current->flags &= ~PF_MCE_PROCESS; | 1554 | current->flags &= ~PF_MCE_PROCESS; |
1553 | break; | 1555 | break; |
1554 | case 1: | 1556 | case PR_MCE_KILL_SET: |
1555 | current->flags |= PF_MCE_PROCESS; | 1557 | current->flags |= PF_MCE_PROCESS; |
1556 | if (arg3 != 0) | 1558 | if (arg3 == PR_MCE_KILL_EARLY) |
1557 | current->flags |= PF_MCE_EARLY; | 1559 | current->flags |= PF_MCE_EARLY; |
1558 | else | 1560 | else if (arg3 == PR_MCE_KILL_LATE) |
1559 | current->flags &= ~PF_MCE_EARLY; | 1561 | current->flags &= ~PF_MCE_EARLY; |
1562 | else if (arg3 == PR_MCE_KILL_DEFAULT) | ||
1563 | current->flags &= | ||
1564 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); | ||
1565 | else | ||
1566 | return -EINVAL; | ||
1560 | break; | 1567 | break; |
1561 | default: | 1568 | default: |
1562 | return -EINVAL; | 1569 | return -EINVAL; |
1563 | } | 1570 | } |
1564 | error = 0; | 1571 | error = 0; |
1565 | break; | 1572 | break; |
1566 | 1573 | case PR_MCE_KILL_GET: | |
1574 | if (arg2 | arg3 | arg4 | arg5) | ||
1575 | return -EINVAL; | ||
1576 | if (current->flags & PF_MCE_PROCESS) | ||
1577 | error = (current->flags & PF_MCE_EARLY) ? | ||
1578 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; | ||
1579 | else | ||
1580 | error = PR_MCE_KILL_DEFAULT; | ||
1581 | break; | ||
1567 | default: | 1582 | default: |
1568 | error = -EINVAL; | 1583 | error = -EINVAL; |
1569 | break; | 1584 | break; |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index b38423ca711a..b6e7aaea4604 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
1521 | if (!table->ctl_name && table->strategy) | 1521 | if (!table->ctl_name && table->strategy) |
1522 | set_fail(&fail, table, "Strategy without ctl_name"); | 1522 | set_fail(&fail, table, "Strategy without ctl_name"); |
1523 | #endif | 1523 | #endif |
1524 | #ifdef CONFIG_PROC_FS | 1524 | #ifdef CONFIG_PROC_SYSCTL |
1525 | if (table->procname && !table->proc_handler) | 1525 | if (table->procname && !table->proc_handler) |
1526 | set_fail(&fail, table, "No proc_handler"); | 1526 | set_fail(&fail, table, "No proc_handler"); |
1527 | #endif | 1527 | #endif |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb0f46fa1ecd..c3a4e2907eaa 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/sched.h> | ||
16 | #include <linux/sysdev.h> | 17 | #include <linux/sysdev.h> |
17 | #include <linux/clocksource.h> | 18 | #include <linux/clocksource.h> |
18 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 37ba67e33265..6dc4e5ef7a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -740,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
740 | out: | 740 | out: |
741 | mutex_unlock(&ftrace_profile_lock); | 741 | mutex_unlock(&ftrace_profile_lock); |
742 | 742 | ||
743 | filp->f_pos += cnt; | 743 | *ppos += cnt; |
744 | 744 | ||
745 | return cnt; | 745 | return cnt; |
746 | } | 746 | } |
@@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2222 | ret = ftrace_process_regex(parser->buffer, | 2222 | ret = ftrace_process_regex(parser->buffer, |
2223 | parser->idx, enable); | 2223 | parser->idx, enable); |
2224 | if (ret) | 2224 | if (ret) |
2225 | goto out; | 2225 | goto out_unlock; |
2226 | 2226 | ||
2227 | trace_parser_clear(parser); | 2227 | trace_parser_clear(parser); |
2228 | } | 2228 | } |
2229 | 2229 | ||
2230 | ret = read; | 2230 | ret = read; |
2231 | 2231 | out_unlock: | |
2232 | mutex_unlock(&ftrace_regex_lock); | 2232 | mutex_unlock(&ftrace_regex_lock); |
2233 | out: | 2233 | |
2234 | return ret; | 2234 | return ret; |
2235 | } | 2235 | } |
2236 | 2236 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff01970547..5dd017fea6f5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -483,7 +483,7 @@ struct ring_buffer_iter { | |||
483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
484 | #define DEBUG_SHIFT 0 | 484 | #define DEBUG_SHIFT 0 |
485 | 485 | ||
486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) | 486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) |
487 | { | 487 | { |
488 | /* shift to debug/test normalization and TIME_EXTENTS */ | 488 | /* shift to debug/test normalization and TIME_EXTENTS */ |
489 | return buffer->clock() << DEBUG_SHIFT; | 489 | return buffer->clock() << DEBUG_SHIFT; |
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | |||
494 | u64 time; | 494 | u64 time; |
495 | 495 | ||
496 | preempt_disable_notrace(); | 496 | preempt_disable_notrace(); |
497 | time = rb_time_stamp(buffer, cpu); | 497 | time = rb_time_stamp(buffer); |
498 | preempt_enable_no_resched_notrace(); | 498 | preempt_enable_no_resched_notrace(); |
499 | 499 | ||
500 | return time; | 500 | return time; |
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list) | |||
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * rb_is_head_page - test if the give page is the head page | 602 | * rb_is_head_page - test if the given page is the head page |
603 | * | 603 | * |
604 | * Because the reader may move the head_page pointer, we can | 604 | * Because the reader may move the head_page pointer, we can |
605 | * not trust what the head page is (it may be pointing to | 605 | * not trust what the head page is (it may be pointing to |
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1193 | atomic_inc(&cpu_buffer->record_disabled); | 1193 | atomic_inc(&cpu_buffer->record_disabled); |
1194 | synchronize_sched(); | 1194 | synchronize_sched(); |
1195 | 1195 | ||
1196 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
1196 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
1197 | 1198 | ||
1198 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1207 | return; | 1208 | return; |
1208 | 1209 | ||
1209 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1210 | 1212 | ||
1211 | rb_check_pages(cpu_buffer); | 1213 | rb_check_pages(cpu_buffer); |
1212 | 1214 | ||
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1868 | * Nested commits always have zero deltas, so | 1870 | * Nested commits always have zero deltas, so |
1869 | * just reread the time stamp | 1871 | * just reread the time stamp |
1870 | */ | 1872 | */ |
1871 | *ts = rb_time_stamp(buffer, cpu_buffer->cpu); | 1873 | *ts = rb_time_stamp(buffer); |
1872 | next_page->page->time_stamp = *ts; | 1874 | next_page->page->time_stamp = *ts; |
1873 | } | 1875 | } |
1874 | 1876 | ||
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2111 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 2113 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
2112 | goto out_fail; | 2114 | goto out_fail; |
2113 | 2115 | ||
2114 | ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); | 2116 | ts = rb_time_stamp(cpu_buffer->buffer); |
2115 | 2117 | ||
2116 | /* | 2118 | /* |
2117 | * Only the first commit can update the timestamp. | 2119 | * Only the first commit can update the timestamp. |
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
2681 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 2683 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
2682 | 2684 | ||
2683 | /** | 2685 | /** |
2684 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 2686 | * ring_buffer_overruns - get the number of overruns in buffer |
2685 | * @buffer: The ring buffer | 2687 | * @buffer: The ring buffer |
2686 | * | 2688 | * |
2687 | * Returns the total number of overruns in the ring buffer | 2689 | * Returns the total number of overruns in the ring buffer |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..b20d3ec75de9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | 1393 | ||
1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1395 | { | 1395 | { |
1396 | return trace_array_printk(&global_trace, ip, fmt, args); | 1396 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1397 | } | 1397 | } |
1398 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1398 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1399 | 1399 | ||
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2440 | return ret; | 2440 | return ret; |
2441 | } | 2441 | } |
2442 | 2442 | ||
2443 | filp->f_pos += cnt; | 2443 | *ppos += cnt; |
2444 | 2444 | ||
2445 | return cnt; | 2445 | return cnt; |
2446 | } | 2446 | } |
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2582 | } | 2582 | } |
2583 | mutex_unlock(&trace_types_lock); | 2583 | mutex_unlock(&trace_types_lock); |
2584 | 2584 | ||
2585 | filp->f_pos += cnt; | 2585 | *ppos += cnt; |
2586 | 2586 | ||
2587 | return cnt; | 2587 | return cnt; |
2588 | } | 2588 | } |
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2764 | if (err) | 2764 | if (err) |
2765 | return err; | 2765 | return err; |
2766 | 2766 | ||
2767 | filp->f_pos += ret; | 2767 | *ppos += ret; |
2768 | 2768 | ||
2769 | return ret; | 2769 | return ret; |
2770 | } | 2770 | } |
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3299 | } | 3299 | } |
3300 | } | 3300 | } |
3301 | 3301 | ||
3302 | filp->f_pos += cnt; | 3302 | *ppos += cnt; |
3303 | 3303 | ||
3304 | /* If check pages failed, return ENOMEM */ | 3304 | /* If check pages failed, return ENOMEM */ |
3305 | if (tracing_disabled) | 3305 | if (tracing_disabled) |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927f..98a6cc5c64ed 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps) | |||
933 | 933 | ||
934 | while (!list_empty(&ps->postfix)) { | 934 | while (!list_empty(&ps->postfix)) { |
935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); | 935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
936 | kfree(elt->operand); | ||
937 | list_del(&elt->list); | 936 | list_del(&elt->list); |
937 | kfree(elt->operand); | ||
938 | kfree(elt); | ||
938 | } | 939 | } |
939 | } | 940 | } |
940 | 941 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed17565826b0..b6c12c6a1bcd 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
69 | * @s: trace sequence descriptor | 69 | * @s: trace sequence descriptor |
70 | * @fmt: printf format string | 70 | * @fmt: printf format string |
71 | * | 71 | * |
72 | * It returns 0 if the trace oversizes the buffer's free | ||
73 | * space, 1 otherwise. | ||
74 | * | ||
72 | * The tracer may use either sequence operations or its own | 75 | * The tracer may use either sequence operations or its own |
73 | * copy to user routines. To simplify formating of a trace | 76 | * copy to user routines. To simplify formating of a trace |
74 | * trace_seq_printf is used to store strings into a special | 77 | * trace_seq_printf is used to store strings into a special |
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
95 | 98 | ||
96 | s->len += ret; | 99 | s->len += ret; |
97 | 100 | ||
98 | return len; | 101 | return 1; |
99 | } | 102 | } |
100 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 103 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
101 | 104 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b1..12328147132c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, | |||
640 | EXPORT_SYMBOL(schedule_delayed_work); | 640 | EXPORT_SYMBOL(schedule_delayed_work); |
641 | 641 | ||
642 | /** | 642 | /** |
643 | * flush_delayed_work - block until a dwork_struct's callback has terminated | ||
644 | * @dwork: the delayed work which is to be flushed | ||
645 | * | ||
646 | * Any timeout is cancelled, and any pending work is run immediately. | ||
647 | */ | ||
648 | void flush_delayed_work(struct delayed_work *dwork) | ||
649 | { | ||
650 | if (del_timer_sync(&dwork->timer)) { | ||
651 | struct cpu_workqueue_struct *cwq; | ||
652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | ||
653 | __queue_work(cwq, &dwork->work); | ||
654 | put_cpu(); | ||
655 | } | ||
656 | flush_work(&dwork->work); | ||
657 | } | ||
658 | EXPORT_SYMBOL(flush_delayed_work); | ||
659 | |||
660 | /** | ||
643 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 661 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
644 | * @cpu: cpu to use | 662 | * @cpu: cpu to use |
645 | * @dwork: job to be done | 663 | * @dwork: job to be done |
@@ -667,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
667 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
668 | { | 686 | { |
669 | int cpu; | 687 | int cpu; |
688 | int orig = -1; | ||
670 | struct work_struct *works; | 689 | struct work_struct *works; |
671 | 690 | ||
672 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
673 | if (!works) | 692 | if (!works) |
674 | return -ENOMEM; | 693 | return -ENOMEM; |
675 | 694 | ||
695 | /* | ||
696 | * when running in keventd don't schedule a work item on itself. | ||
697 | * Can just call directly because the work queue is already bound. | ||
698 | * This also is faster. | ||
699 | * Make this a generic parameter for other workqueues? | ||
700 | */ | ||
701 | if (current_is_keventd()) { | ||
702 | orig = raw_smp_processor_id(); | ||
703 | INIT_WORK(per_cpu_ptr(works, orig), func); | ||
704 | func(per_cpu_ptr(works, orig)); | ||
705 | } | ||
706 | |||
676 | get_online_cpus(); | 707 | get_online_cpus(); |
677 | for_each_online_cpu(cpu) { | 708 | for_each_online_cpu(cpu) { |
678 | struct work_struct *work = per_cpu_ptr(works, cpu); | 709 | struct work_struct *work = per_cpu_ptr(works, cpu); |
679 | 710 | ||
711 | if (cpu == orig) | ||
712 | continue; | ||
680 | INIT_WORK(work, func); | 713 | INIT_WORK(work, func); |
681 | schedule_work_on(cpu, work); | 714 | schedule_work_on(cpu, work); |
682 | } | 715 | } |
683 | for_each_online_cpu(cpu) | 716 | for_each_online_cpu(cpu) { |
684 | flush_work(per_cpu_ptr(works, cpu)); | 717 | if (cpu != orig) |
718 | flush_work(per_cpu_ptr(works, cpu)); | ||
719 | } | ||
685 | put_online_cpus(); | 720 | put_online_cpus(); |
686 | free_percpu(works); | 721 | free_percpu(works); |
687 | return 0; | 722 | return 0; |