diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 8 | ||||
-rw-r--r-- | kernel/exit.c | 4 | ||||
-rw-r--r-- | kernel/futex.c | 24 | ||||
-rw-r--r-- | kernel/lockdep.c | 20 | ||||
-rw-r--r-- | kernel/perf_event.c | 2 | ||||
-rw-r--r-- | kernel/power/suspend_test.c | 5 | ||||
-rw-r--r-- | kernel/rcutree.c | 44 | ||||
-rw-r--r-- | kernel/rcutree.h | 10 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 46 | ||||
-rw-r--r-- | kernel/sched.c | 13 | ||||
-rw-r--r-- | kernel/sched_fair.c | 27 | ||||
-rw-r--r-- | kernel/sys.c | 25 | ||||
-rw-r--r-- | kernel/sysctl_check.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 3 | ||||
-rw-r--r-- | kernel/workqueue.c | 39 |
16 files changed, 204 insertions, 70 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ca83b73fba19..0249f4be9b5c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | |||
1710 | return -EFAULT; | 1710 | return -EFAULT; |
1711 | 1711 | ||
1712 | buffer[nbytes] = 0; /* nul-terminate */ | 1712 | buffer[nbytes] = 0; /* nul-terminate */ |
1713 | strstrip(buffer); | ||
1714 | if (cft->write_u64) { | 1713 | if (cft->write_u64) { |
1715 | u64 val = simple_strtoull(buffer, &end, 0); | 1714 | u64 val = simple_strtoull(strstrip(buffer), &end, 0); |
1716 | if (*end) | 1715 | if (*end) |
1717 | return -EINVAL; | 1716 | return -EINVAL; |
1718 | retval = cft->write_u64(cgrp, cft, val); | 1717 | retval = cft->write_u64(cgrp, cft, val); |
1719 | } else { | 1718 | } else { |
1720 | s64 val = simple_strtoll(buffer, &end, 0); | 1719 | s64 val = simple_strtoll(strstrip(buffer), &end, 0); |
1721 | if (*end) | 1720 | if (*end) |
1722 | return -EINVAL; | 1721 | return -EINVAL; |
1723 | retval = cft->write_s64(cgrp, cft, val); | 1722 | retval = cft->write_s64(cgrp, cft, val); |
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft, | |||
1753 | } | 1752 | } |
1754 | 1753 | ||
1755 | buffer[nbytes] = 0; /* nul-terminate */ | 1754 | buffer[nbytes] = 0; /* nul-terminate */ |
1756 | strstrip(buffer); | 1755 | retval = cft->write_string(cgrp, cft, strstrip(buffer)); |
1757 | retval = cft->write_string(cgrp, cft, buffer); | ||
1758 | if (!retval) | 1756 | if (!retval) |
1759 | retval = nbytes; | 1757 | retval = nbytes; |
1760 | out: | 1758 | out: |
diff --git a/kernel/exit.c b/kernel/exit.c index e61891f80123..f7864ac2ecc1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid) | |||
359 | { | 359 | { |
360 | struct task_struct *curr = current->group_leader; | 360 | struct task_struct *curr = current->group_leader; |
361 | 361 | ||
362 | if (task_session(curr) != pid) { | 362 | if (task_session(curr) != pid) |
363 | change_pid(curr, PIDTYPE_SID, pid); | 363 | change_pid(curr, PIDTYPE_SID, pid); |
364 | proc_sid_connector(curr); | ||
365 | } | ||
366 | 364 | ||
367 | if (task_pgrp(curr) != pid) | 365 | if (task_pgrp(curr) != pid) |
368 | change_pid(curr, PIDTYPE_PGID, pid); | 366 | change_pid(curr, PIDTYPE_PGID, pid); |
diff --git a/kernel/futex.c b/kernel/futex.c index 4949d336d88d..642f3bbaacc7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key) | |||
150 | */ | 150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { | 152 | { |
153 | return (key1->both.word == key2->both.word | 153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word | ||
154 | && key1->both.ptr == key2->both.ptr | 155 | && key1->both.ptr == key2->both.ptr |
155 | && key1->both.offset == key2->both.offset); | 156 | && key1->both.offset == key2->both.offset); |
156 | } | 157 | } |
@@ -1028,7 +1029,6 @@ static inline | |||
1028 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 1029 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1029 | struct futex_hash_bucket *hb) | 1030 | struct futex_hash_bucket *hb) |
1030 | { | 1031 | { |
1031 | drop_futex_key_refs(&q->key); | ||
1032 | get_futex_key_refs(key); | 1032 | get_futex_key_refs(key); |
1033 | q->key = *key; | 1033 | q->key = *key; |
1034 | 1034 | ||
@@ -1226,6 +1226,7 @@ retry_private: | |||
1226 | */ | 1226 | */ |
1227 | if (ret == 1) { | 1227 | if (ret == 1) { |
1228 | WARN_ON(pi_state); | 1228 | WARN_ON(pi_state); |
1229 | drop_count++; | ||
1229 | task_count++; | 1230 | task_count++; |
1230 | ret = get_futex_value_locked(&curval2, uaddr2); | 1231 | ret = get_futex_value_locked(&curval2, uaddr2); |
1231 | if (!ret) | 1232 | if (!ret) |
@@ -1304,6 +1305,7 @@ retry_private: | |||
1304 | if (ret == 1) { | 1305 | if (ret == 1) { |
1305 | /* We got the lock. */ | 1306 | /* We got the lock. */ |
1306 | requeue_pi_wake_futex(this, &key2, hb2); | 1307 | requeue_pi_wake_futex(this, &key2, hb2); |
1308 | drop_count++; | ||
1307 | continue; | 1309 | continue; |
1308 | } else if (ret) { | 1310 | } else if (ret) { |
1309 | /* -EDEADLK */ | 1311 | /* -EDEADLK */ |
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1791 | current->timer_slack_ns); | 1793 | current->timer_slack_ns); |
1792 | } | 1794 | } |
1793 | 1795 | ||
1796 | retry: | ||
1794 | /* Prepare to wait on uaddr. */ | 1797 | /* Prepare to wait on uaddr. */ |
1795 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1798 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1796 | if (ret) | 1799 | if (ret) |
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1808 | goto out_put_key; | 1811 | goto out_put_key; |
1809 | 1812 | ||
1810 | /* | 1813 | /* |
1811 | * We expect signal_pending(current), but another thread may | 1814 | * We expect signal_pending(current), but we might be the |
1812 | * have handled it for us already. | 1815 | * victim of a spurious wakeup as well. |
1813 | */ | 1816 | */ |
1817 | if (!signal_pending(current)) { | ||
1818 | put_futex_key(fshared, &q.key); | ||
1819 | goto retry; | ||
1820 | } | ||
1821 | |||
1814 | ret = -ERESTARTSYS; | 1822 | ret = -ERESTARTSYS; |
1815 | if (!abs_time) | 1823 | if (!abs_time) |
1816 | goto out_put_key; | 1824 | goto out_put_key; |
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2118 | */ | 2126 | */ |
2119 | plist_del(&q->list, &q->list.plist); | 2127 | plist_del(&q->list, &q->list.plist); |
2120 | 2128 | ||
2129 | /* Handle spurious wakeups gracefully */ | ||
2130 | ret = -EAGAIN; | ||
2121 | if (timeout && !timeout->task) | 2131 | if (timeout && !timeout->task) |
2122 | ret = -ETIMEDOUT; | 2132 | ret = -ETIMEDOUT; |
2123 | else | 2133 | else if (signal_pending(current)) |
2124 | ret = -ERESTARTNOINTR; | 2134 | ret = -ERESTARTNOINTR; |
2125 | } | 2135 | } |
2126 | return ret; | 2136 | return ret; |
@@ -2198,6 +2208,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2198 | debug_rt_mutex_init_waiter(&rt_waiter); | 2208 | debug_rt_mutex_init_waiter(&rt_waiter); |
2199 | rt_waiter.task = NULL; | 2209 | rt_waiter.task = NULL; |
2200 | 2210 | ||
2211 | retry: | ||
2201 | key2 = FUTEX_KEY_INIT; | 2212 | key2 = FUTEX_KEY_INIT; |
2202 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 2213 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); |
2203 | if (unlikely(ret != 0)) | 2214 | if (unlikely(ret != 0)) |
@@ -2292,6 +2303,9 @@ out_put_keys: | |||
2292 | out_key2: | 2303 | out_key2: |
2293 | put_futex_key(fshared, &key2); | 2304 | put_futex_key(fshared, &key2); |
2294 | 2305 | ||
2306 | /* Spurious wakeup ? */ | ||
2307 | if (ret == -EAGAIN) | ||
2308 | goto retry; | ||
2295 | out: | 2309 | out: |
2296 | if (to) { | 2310 | if (to) { |
2297 | hrtimer_cancel(&to->timer); | 2311 | hrtimer_cancel(&to->timer); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3815ac1d58b2..9af56723c096 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
144 | 144 | ||
145 | static inline u64 lockstat_clock(void) | ||
146 | { | ||
147 | return cpu_clock(smp_processor_id()); | ||
148 | } | ||
149 | |||
145 | static int lock_point(unsigned long points[], unsigned long ip) | 150 | static int lock_point(unsigned long points[], unsigned long ip) |
146 | { | 151 | { |
147 | int i; | 152 | int i; |
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) | |||
158 | return i; | 163 | return i; |
159 | } | 164 | } |
160 | 165 | ||
161 | static void lock_time_inc(struct lock_time *lt, s64 time) | 166 | static void lock_time_inc(struct lock_time *lt, u64 time) |
162 | { | 167 | { |
163 | if (time > lt->max) | 168 | if (time > lt->max) |
164 | lt->max = time; | 169 | lt->max = time; |
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) | |||
234 | static void lock_release_holdtime(struct held_lock *hlock) | 239 | static void lock_release_holdtime(struct held_lock *hlock) |
235 | { | 240 | { |
236 | struct lock_class_stats *stats; | 241 | struct lock_class_stats *stats; |
237 | s64 holdtime; | 242 | u64 holdtime; |
238 | 243 | ||
239 | if (!lock_stat) | 244 | if (!lock_stat) |
240 | return; | 245 | return; |
241 | 246 | ||
242 | holdtime = sched_clock() - hlock->holdtime_stamp; | 247 | holdtime = lockstat_clock() - hlock->holdtime_stamp; |
243 | 248 | ||
244 | stats = get_lock_stats(hlock_class(hlock)); | 249 | stats = get_lock_stats(hlock_class(hlock)); |
245 | if (hlock->read) | 250 | if (hlock->read) |
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2792 | hlock->references = references; | 2797 | hlock->references = references; |
2793 | #ifdef CONFIG_LOCK_STAT | 2798 | #ifdef CONFIG_LOCK_STAT |
2794 | hlock->waittime_stamp = 0; | 2799 | hlock->waittime_stamp = 0; |
2795 | hlock->holdtime_stamp = sched_clock(); | 2800 | hlock->holdtime_stamp = lockstat_clock(); |
2796 | #endif | 2801 | #endif |
2797 | 2802 | ||
2798 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2803 | if (check == 2 && !mark_irqflags(curr, hlock)) |
@@ -3322,7 +3327,7 @@ found_it: | |||
3322 | if (hlock->instance != lock) | 3327 | if (hlock->instance != lock) |
3323 | return; | 3328 | return; |
3324 | 3329 | ||
3325 | hlock->waittime_stamp = sched_clock(); | 3330 | hlock->waittime_stamp = lockstat_clock(); |
3326 | 3331 | ||
3327 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); | 3332 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
3328 | contending_point = lock_point(hlock_class(hlock)->contending_point, | 3333 | contending_point = lock_point(hlock_class(hlock)->contending_point, |
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3345 | struct held_lock *hlock, *prev_hlock; | 3350 | struct held_lock *hlock, *prev_hlock; |
3346 | struct lock_class_stats *stats; | 3351 | struct lock_class_stats *stats; |
3347 | unsigned int depth; | 3352 | unsigned int depth; |
3348 | u64 now; | 3353 | u64 now, waittime = 0; |
3349 | s64 waittime = 0; | ||
3350 | int i, cpu; | 3354 | int i, cpu; |
3351 | 3355 | ||
3352 | depth = curr->lockdep_depth; | 3356 | depth = curr->lockdep_depth; |
@@ -3374,7 +3378,7 @@ found_it: | |||
3374 | 3378 | ||
3375 | cpu = smp_processor_id(); | 3379 | cpu = smp_processor_id(); |
3376 | if (hlock->waittime_stamp) { | 3380 | if (hlock->waittime_stamp) { |
3377 | now = sched_clock(); | 3381 | now = lockstat_clock(); |
3378 | waittime = now - hlock->waittime_stamp; | 3382 | waittime = now - hlock->waittime_stamp; |
3379 | hlock->holdtime_stamp = now; | 3383 | hlock->holdtime_stamp = now; |
3380 | } | 3384 | } |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..afb7ef3dbc44 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1355 | u64 interrupts, freq; | 1355 | u64 interrupts, freq; |
1356 | 1356 | ||
1357 | spin_lock(&ctx->lock); | 1357 | spin_lock(&ctx->lock); |
1358 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1358 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1360 | continue; | 1360 | continue; |
1361 | 1361 | ||
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 17d8bb1acf9c..25596e450ac7 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * The time it takes is system-specific though, so when we test this | 19 | * The time it takes is system-specific though, so when we test this |
20 | * during system bootup we allow a LOT of time. | 20 | * during system bootup we allow a LOT of time. |
21 | */ | 21 | */ |
22 | #define TEST_SUSPEND_SECONDS 5 | 22 | #define TEST_SUSPEND_SECONDS 10 |
23 | 23 | ||
24 | static unsigned long suspend_test_start_time; | 24 | static unsigned long suspend_test_start_time; |
25 | 25 | ||
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label) | |||
49 | * has some performance issues. The stack dump of a WARN_ON | 49 | * has some performance issues. The stack dump of a WARN_ON |
50 | * is more likely to get the right attention than a printk... | 50 | * is more likely to get the right attention than a printk... |
51 | */ | 51 | */ |
52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); | 52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), |
53 | "Component: %s, time: %u\n", label, msec); | ||
53 | } | 54 | } |
54 | 55 | ||
55 | /* | 56 | /* |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 705f02ac7433..0536125b0497 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -913,7 +913,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
914 | break; | 914 | break; |
915 | } | 915 | } |
916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 916 | |
917 | /* | ||
918 | * If there was a task blocking the current grace period, | ||
919 | * and if all CPUs have checked in, we need to propagate | ||
920 | * the quiescent state up the rcu_node hierarchy. But that | ||
921 | * is inconvenient at the moment due to deadlock issues if | ||
922 | * this should end the current grace period. So set the | ||
923 | * offlined CPU's bit in ->qsmask in order to force the | ||
924 | * next force_quiescent_state() invocation to clean up this | ||
925 | * mess in a deadlock-free manner. | ||
926 | */ | ||
927 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
928 | rnp->qsmask |= mask; | ||
929 | |||
917 | mask = rnp->grpmask; | 930 | mask = rnp->grpmask; |
918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 931 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
919 | rnp = rnp->parent; | 932 | rnp = rnp->parent; |
@@ -958,7 +971,7 @@ static void rcu_offline_cpu(int cpu) | |||
958 | * Invoke any RCU callbacks that have made it to the end of their grace | 971 | * Invoke any RCU callbacks that have made it to the end of their grace |
959 | * period. Thottle as specified by rdp->blimit. | 972 | * period. Thottle as specified by rdp->blimit. |
960 | */ | 973 | */ |
961 | static void rcu_do_batch(struct rcu_data *rdp) | 974 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
962 | { | 975 | { |
963 | unsigned long flags; | 976 | unsigned long flags; |
964 | struct rcu_head *next, *list, **tail; | 977 | struct rcu_head *next, *list, **tail; |
@@ -1011,6 +1024,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
1011 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1024 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
1012 | rdp->blimit = blimit; | 1025 | rdp->blimit = blimit; |
1013 | 1026 | ||
1027 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1028 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1029 | rdp->qlen_last_fqs_check = 0; | ||
1030 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1031 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1032 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1033 | |||
1014 | local_irq_restore(flags); | 1034 | local_irq_restore(flags); |
1015 | 1035 | ||
1016 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1036 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1224,7 +1244,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1224 | } | 1244 | } |
1225 | 1245 | ||
1226 | /* If there are callbacks ready, invoke them. */ | 1246 | /* If there are callbacks ready, invoke them. */ |
1227 | rcu_do_batch(rdp); | 1247 | rcu_do_batch(rsp, rdp); |
1228 | } | 1248 | } |
1229 | 1249 | ||
1230 | /* | 1250 | /* |
@@ -1288,10 +1308,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1288 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1308 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1289 | } | 1309 | } |
1290 | 1310 | ||
1291 | /* Force the grace period if too many callbacks or too long waiting. */ | 1311 | /* |
1292 | if (unlikely(++rdp->qlen > qhimark)) { | 1312 | * Force the grace period if too many callbacks or too long waiting. |
1313 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1314 | * if some other CPU has recently done so. Also, don't bother | ||
1315 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1316 | * is the only one waiting for a grace period to complete. | ||
1317 | */ | ||
1318 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1293 | rdp->blimit = LONG_MAX; | 1319 | rdp->blimit = LONG_MAX; |
1294 | force_quiescent_state(rsp, 0); | 1320 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1321 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1322 | force_quiescent_state(rsp, 0); | ||
1323 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1324 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1295 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1325 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1296 | force_quiescent_state(rsp, 1); | 1326 | force_quiescent_state(rsp, 1); |
1297 | local_irq_restore(flags); | 1327 | local_irq_restore(flags); |
@@ -1523,6 +1553,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1523 | rdp->beenonline = 1; /* We have now been online. */ | 1553 | rdp->beenonline = 1; /* We have now been online. */ |
1524 | rdp->preemptable = preemptable; | 1554 | rdp->preemptable = preemptable; |
1525 | rdp->passed_quiesc_completed = lastcomp - 1; | 1555 | rdp->passed_quiesc_completed = lastcomp - 1; |
1556 | rdp->qlen_last_fqs_check = 0; | ||
1557 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1526 | rdp->blimit = blimit; | 1558 | rdp->blimit = blimit; |
1527 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1559 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1528 | 1560 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index b40ac5706040..1823c6e20609 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -167,6 +167,10 @@ struct rcu_data { | |||
167 | struct rcu_head *nxtlist; | 167 | struct rcu_head *nxtlist; |
168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
169 | long qlen; /* # of queued callbacks */ | 169 | long qlen; /* # of queued callbacks */ |
170 | long qlen_last_fqs_check; | ||
171 | /* qlen at last check for QS forcing */ | ||
172 | unsigned long n_force_qs_snap; | ||
173 | /* did other CPU force QS recently? */ | ||
170 | long blimit; /* Upper limit on a processed batch */ | 174 | long blimit; /* Upper limit on a processed batch */ |
171 | 175 | ||
172 | #ifdef CONFIG_NO_HZ | 176 | #ifdef CONFIG_NO_HZ |
@@ -302,9 +306,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp); | |||
302 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 306 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
303 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 307 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
304 | #ifdef CONFIG_HOTPLUG_CPU | 308 | #ifdef CONFIG_HOTPLUG_CPU |
305 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 309 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
306 | struct rcu_node *rnp, | 310 | struct rcu_node *rnp, |
307 | struct rcu_data *rdp); | 311 | struct rcu_data *rdp); |
308 | static void rcu_preempt_offline_cpu(int cpu); | 312 | static void rcu_preempt_offline_cpu(int cpu); |
309 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 313 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
310 | static void rcu_preempt_check_callbacks(int cpu); | 314 | static void rcu_preempt_check_callbacks(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c0cb783aa16a..ef2a58c2b9d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
304 | * parent is to remove the need for rcu_read_unlock_special() to | 304 | * parent is to remove the need for rcu_read_unlock_special() to |
305 | * make more than two attempts to acquire the target rcu_node's lock. | 305 | * make more than two attempts to acquire the target rcu_node's lock. |
306 | * | 306 | * |
307 | * Returns 1 if there was previously a task blocking the current grace | ||
308 | * period on the specified rcu_node structure. | ||
309 | * | ||
307 | * The caller must hold rnp->lock with irqs disabled. | 310 | * The caller must hold rnp->lock with irqs disabled. |
308 | */ | 311 | */ |
309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 312 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
310 | struct rcu_node *rnp, | 313 | struct rcu_node *rnp, |
311 | struct rcu_data *rdp) | 314 | struct rcu_data *rdp) |
312 | { | 315 | { |
313 | int i; | 316 | int i; |
314 | struct list_head *lp; | 317 | struct list_head *lp; |
315 | struct list_head *lp_root; | 318 | struct list_head *lp_root; |
319 | int retval = rcu_preempted_readers(rnp); | ||
316 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 320 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
317 | struct task_struct *tp; | 321 | struct task_struct *tp; |
318 | 322 | ||
319 | if (rnp == rnp_root) { | 323 | if (rnp == rnp_root) { |
320 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 324 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
321 | return; /* Shouldn't happen: at least one CPU online. */ | 325 | return 0; /* Shouldn't happen: at least one CPU online. */ |
322 | } | 326 | } |
323 | WARN_ON_ONCE(rnp != rdp->mynode && | 327 | WARN_ON_ONCE(rnp != rdp->mynode && |
324 | (!list_empty(&rnp->blocked_tasks[0]) || | 328 | (!list_empty(&rnp->blocked_tasks[0]) || |
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
342 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
343 | } | 347 | } |
344 | } | 348 | } |
349 | |||
350 | return retval; | ||
345 | } | 351 | } |
346 | 352 | ||
347 | /* | 353 | /* |
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
393 | EXPORT_SYMBOL_GPL(call_rcu); | 399 | EXPORT_SYMBOL_GPL(call_rcu); |
394 | 400 | ||
395 | /* | 401 | /* |
402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | ||
403 | * grace period, but this is the crude slow compatability hack, so just | ||
404 | * invoke synchronize_rcu(). | ||
405 | */ | ||
406 | void synchronize_rcu_expedited(void) | ||
407 | { | ||
408 | synchronize_rcu(); | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
411 | |||
412 | /* | ||
396 | * Check to see if there is any immediate preemptable-RCU-related work | 413 | * Check to see if there is any immediate preemptable-RCU-related work |
397 | * to be done. | 414 | * to be done. |
398 | */ | 415 | */ |
@@ -521,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
521 | 538 | ||
522 | /* | 539 | /* |
523 | * Because preemptable RCU does not exist, it never needs to migrate | 540 | * Because preemptable RCU does not exist, it never needs to migrate |
524 | * tasks that were blocked within RCU read-side critical sections. | 541 | * tasks that were blocked within RCU read-side critical sections, and |
542 | * such non-existent tasks cannot possibly have been blocking the current | ||
543 | * grace period. | ||
525 | */ | 544 | */ |
526 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 545 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
527 | struct rcu_node *rnp, | 546 | struct rcu_node *rnp, |
528 | struct rcu_data *rdp) | 547 | struct rcu_data *rdp) |
529 | { | 548 | { |
549 | return 0; | ||
530 | } | 550 | } |
531 | 551 | ||
532 | /* | 552 | /* |
@@ -565,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
565 | EXPORT_SYMBOL_GPL(call_rcu); | 585 | EXPORT_SYMBOL_GPL(call_rcu); |
566 | 586 | ||
567 | /* | 587 | /* |
588 | * Wait for an rcu-preempt grace period, but make it happen quickly. | ||
589 | * But because preemptable RCU does not exist, map to rcu-sched. | ||
590 | */ | ||
591 | void synchronize_rcu_expedited(void) | ||
592 | { | ||
593 | synchronize_sched_expedited(); | ||
594 | } | ||
595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
596 | |||
597 | /* | ||
568 | * Because preemptable RCU does not exist, it never has any work to do. | 598 | * Because preemptable RCU does not exist, it never has any work to do. |
569 | */ | 599 | */ |
570 | static int rcu_preempt_pending(int cpu) | 600 | static int rcu_preempt_pending(int cpu) |
diff --git a/kernel/sched.c b/kernel/sched.c index 76c0e9691fc0..e88689522e66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 676 | ||
677 | /** | 677 | /** |
678 | * runqueue_is_locked | 678 | * runqueue_is_locked |
679 | * @cpu: the processor in question. | ||
679 | * | 680 | * |
680 | * Returns true if the current cpu runqueue is locked. | 681 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 682 | * This interface allows printk to be called with the runqueue lock |
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2311 | { | 2312 | { |
2312 | int cpu, orig_cpu, this_cpu, success = 0; | 2313 | int cpu, orig_cpu, this_cpu, success = 0; |
2313 | unsigned long flags; | 2314 | unsigned long flags; |
2314 | struct rq *rq; | 2315 | struct rq *rq, *orig_rq; |
2315 | 2316 | ||
2316 | if (!sched_feat(SYNC_WAKEUPS)) | 2317 | if (!sched_feat(SYNC_WAKEUPS)) |
2317 | wake_flags &= ~WF_SYNC; | 2318 | wake_flags &= ~WF_SYNC; |
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2319 | this_cpu = get_cpu(); | 2320 | this_cpu = get_cpu(); |
2320 | 2321 | ||
2321 | smp_wmb(); | 2322 | smp_wmb(); |
2322 | rq = task_rq_lock(p, &flags); | 2323 | rq = orig_rq = task_rq_lock(p, &flags); |
2323 | update_rq_clock(rq); | 2324 | update_rq_clock(rq); |
2324 | if (!(p->state & state)) | 2325 | if (!(p->state & state)) |
2325 | goto out; | 2326 | goto out; |
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2350 | set_task_cpu(p, cpu); | 2351 | set_task_cpu(p, cpu); |
2351 | 2352 | ||
2352 | rq = task_rq_lock(p, &flags); | 2353 | rq = task_rq_lock(p, &flags); |
2354 | |||
2355 | if (rq != orig_rq) | ||
2356 | update_rq_clock(rq); | ||
2357 | |||
2353 | WARN_ON(p->state != TASK_WAKING); | 2358 | WARN_ON(p->state != TASK_WAKING); |
2354 | cpu = task_cpu(p); | 2359 | cpu = task_cpu(p); |
2355 | 2360 | ||
@@ -3656,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3656 | 3661 | ||
3657 | /** | 3662 | /** |
3658 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3663 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3664 | * @sd: The sched_domain whose statistics are to be updated. | ||
3659 | * @group: sched_group whose statistics are to be updated. | 3665 | * @group: sched_group whose statistics are to be updated. |
3660 | * @this_cpu: Cpu for which load balance is currently performed. | 3666 | * @this_cpu: Cpu for which load balance is currently performed. |
3661 | * @idle: Idle status of this_cpu | 3667 | * @idle: Idle status of this_cpu |
@@ -6718,9 +6724,6 @@ EXPORT_SYMBOL(yield); | |||
6718 | /* | 6724 | /* |
6719 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6725 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6720 | * that process accounting knows that this is a task in IO wait state. | 6726 | * that process accounting knows that this is a task in IO wait state. |
6721 | * | ||
6722 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6723 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6724 | */ | 6727 | */ |
6725 | void __sched io_schedule(void) | 6728 | void __sched io_schedule(void) |
6726 | { | 6729 | { |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4e777b47eeda..c32c3e643daa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -861,12 +861,21 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
862 | { | 862 | { |
863 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 863 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
864 | struct sched_entity *buddy; | ||
864 | 865 | ||
865 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 866 | if (cfs_rq->next) { |
866 | return cfs_rq->next; | 867 | buddy = cfs_rq->next; |
868 | cfs_rq->next = NULL; | ||
869 | if (wakeup_preempt_entity(buddy, se) < 1) | ||
870 | return buddy; | ||
871 | } | ||
867 | 872 | ||
868 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 873 | if (cfs_rq->last) { |
869 | return cfs_rq->last; | 874 | buddy = cfs_rq->last; |
875 | cfs_rq->last = NULL; | ||
876 | if (wakeup_preempt_entity(buddy, se) < 1) | ||
877 | return buddy; | ||
878 | } | ||
870 | 879 | ||
871 | return se; | 880 | return se; |
872 | } | 881 | } |
@@ -1654,16 +1663,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1654 | 1663 | ||
1655 | do { | 1664 | do { |
1656 | se = pick_next_entity(cfs_rq); | 1665 | se = pick_next_entity(cfs_rq); |
1657 | /* | ||
1658 | * If se was a buddy, clear it so that it will have to earn | ||
1659 | * the favour again. | ||
1660 | * | ||
1661 | * If se was not a buddy, clear the buddies because neither | ||
1662 | * was elegible to run, let them earn it again. | ||
1663 | * | ||
1664 | * IOW. unconditionally clear buddies. | ||
1665 | */ | ||
1666 | __clear_buddies(cfs_rq, NULL); | ||
1667 | set_next_entity(cfs_rq, se); | 1666 | set_next_entity(cfs_rq, se); |
1668 | cfs_rq = group_cfs_rq(se); | 1667 | cfs_rq = group_cfs_rq(se); |
1669 | } while (cfs_rq); | 1668 | } while (cfs_rq); |
diff --git a/kernel/sys.c b/kernel/sys.c index 255475d163e0..ce17760d9c51 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid) | |||
1110 | err = session; | 1110 | err = session; |
1111 | out: | 1111 | out: |
1112 | write_unlock_irq(&tasklist_lock); | 1112 | write_unlock_irq(&tasklist_lock); |
1113 | if (err > 0) | ||
1114 | proc_sid_connector(group_leader); | ||
1113 | return err; | 1115 | return err; |
1114 | } | 1116 | } |
1115 | 1117 | ||
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1546 | if (arg4 | arg5) | 1548 | if (arg4 | arg5) |
1547 | return -EINVAL; | 1549 | return -EINVAL; |
1548 | switch (arg2) { | 1550 | switch (arg2) { |
1549 | case 0: | 1551 | case PR_MCE_KILL_CLEAR: |
1550 | if (arg3 != 0) | 1552 | if (arg3 != 0) |
1551 | return -EINVAL; | 1553 | return -EINVAL; |
1552 | current->flags &= ~PF_MCE_PROCESS; | 1554 | current->flags &= ~PF_MCE_PROCESS; |
1553 | break; | 1555 | break; |
1554 | case 1: | 1556 | case PR_MCE_KILL_SET: |
1555 | current->flags |= PF_MCE_PROCESS; | 1557 | current->flags |= PF_MCE_PROCESS; |
1556 | if (arg3 != 0) | 1558 | if (arg3 == PR_MCE_KILL_EARLY) |
1557 | current->flags |= PF_MCE_EARLY; | 1559 | current->flags |= PF_MCE_EARLY; |
1558 | else | 1560 | else if (arg3 == PR_MCE_KILL_LATE) |
1559 | current->flags &= ~PF_MCE_EARLY; | 1561 | current->flags &= ~PF_MCE_EARLY; |
1562 | else if (arg3 == PR_MCE_KILL_DEFAULT) | ||
1563 | current->flags &= | ||
1564 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); | ||
1565 | else | ||
1566 | return -EINVAL; | ||
1560 | break; | 1567 | break; |
1561 | default: | 1568 | default: |
1562 | return -EINVAL; | 1569 | return -EINVAL; |
1563 | } | 1570 | } |
1564 | error = 0; | 1571 | error = 0; |
1565 | break; | 1572 | break; |
1566 | 1573 | case PR_MCE_KILL_GET: | |
1574 | if (arg2 | arg3 | arg4 | arg5) | ||
1575 | return -EINVAL; | ||
1576 | if (current->flags & PF_MCE_PROCESS) | ||
1577 | error = (current->flags & PF_MCE_EARLY) ? | ||
1578 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; | ||
1579 | else | ||
1580 | error = PR_MCE_KILL_DEFAULT; | ||
1581 | break; | ||
1567 | default: | 1582 | default: |
1568 | error = -EINVAL; | 1583 | error = -EINVAL; |
1569 | break; | 1584 | break; |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index b38423ca711a..b6e7aaea4604 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
1521 | if (!table->ctl_name && table->strategy) | 1521 | if (!table->ctl_name && table->strategy) |
1522 | set_fail(&fail, table, "Strategy without ctl_name"); | 1522 | set_fail(&fail, table, "Strategy without ctl_name"); |
1523 | #endif | 1523 | #endif |
1524 | #ifdef CONFIG_PROC_FS | 1524 | #ifdef CONFIG_PROC_SYSCTL |
1525 | if (table->procname && !table->proc_handler) | 1525 | if (table->procname && !table->proc_handler) |
1526 | set_fail(&fail, table, "No proc_handler"); | 1526 | set_fail(&fail, table, "No proc_handler"); |
1527 | #endif | 1527 | #endif |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..c820b0310a12 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | 1393 | ||
1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1395 | { | 1395 | { |
1396 | return trace_array_printk(&global_trace, ip, fmt, args); | 1396 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1397 | } | 1397 | } |
1398 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1398 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1399 | 1399 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927f..98a6cc5c64ed 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps) | |||
933 | 933 | ||
934 | while (!list_empty(&ps->postfix)) { | 934 | while (!list_empty(&ps->postfix)) { |
935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); | 935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
936 | kfree(elt->operand); | ||
937 | list_del(&elt->list); | 936 | list_del(&elt->list); |
937 | kfree(elt->operand); | ||
938 | kfree(elt); | ||
938 | } | 939 | } |
939 | } | 940 | } |
940 | 941 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b1..12328147132c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, | |||
640 | EXPORT_SYMBOL(schedule_delayed_work); | 640 | EXPORT_SYMBOL(schedule_delayed_work); |
641 | 641 | ||
642 | /** | 642 | /** |
643 | * flush_delayed_work - block until a dwork_struct's callback has terminated | ||
644 | * @dwork: the delayed work which is to be flushed | ||
645 | * | ||
646 | * Any timeout is cancelled, and any pending work is run immediately. | ||
647 | */ | ||
648 | void flush_delayed_work(struct delayed_work *dwork) | ||
649 | { | ||
650 | if (del_timer_sync(&dwork->timer)) { | ||
651 | struct cpu_workqueue_struct *cwq; | ||
652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | ||
653 | __queue_work(cwq, &dwork->work); | ||
654 | put_cpu(); | ||
655 | } | ||
656 | flush_work(&dwork->work); | ||
657 | } | ||
658 | EXPORT_SYMBOL(flush_delayed_work); | ||
659 | |||
660 | /** | ||
643 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 661 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
644 | * @cpu: cpu to use | 662 | * @cpu: cpu to use |
645 | * @dwork: job to be done | 663 | * @dwork: job to be done |
@@ -667,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
667 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
668 | { | 686 | { |
669 | int cpu; | 687 | int cpu; |
688 | int orig = -1; | ||
670 | struct work_struct *works; | 689 | struct work_struct *works; |
671 | 690 | ||
672 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
673 | if (!works) | 692 | if (!works) |
674 | return -ENOMEM; | 693 | return -ENOMEM; |
675 | 694 | ||
695 | /* | ||
696 | * when running in keventd don't schedule a work item on itself. | ||
697 | * Can just call directly because the work queue is already bound. | ||
698 | * This also is faster. | ||
699 | * Make this a generic parameter for other workqueues? | ||
700 | */ | ||
701 | if (current_is_keventd()) { | ||
702 | orig = raw_smp_processor_id(); | ||
703 | INIT_WORK(per_cpu_ptr(works, orig), func); | ||
704 | func(per_cpu_ptr(works, orig)); | ||
705 | } | ||
706 | |||
676 | get_online_cpus(); | 707 | get_online_cpus(); |
677 | for_each_online_cpu(cpu) { | 708 | for_each_online_cpu(cpu) { |
678 | struct work_struct *work = per_cpu_ptr(works, cpu); | 709 | struct work_struct *work = per_cpu_ptr(works, cpu); |
679 | 710 | ||
711 | if (cpu == orig) | ||
712 | continue; | ||
680 | INIT_WORK(work, func); | 713 | INIT_WORK(work, func); |
681 | schedule_work_on(cpu, work); | 714 | schedule_work_on(cpu, work); |
682 | } | 715 | } |
683 | for_each_online_cpu(cpu) | 716 | for_each_online_cpu(cpu) { |
684 | flush_work(per_cpu_ptr(works, cpu)); | 717 | if (cpu != orig) |
718 | flush_work(per_cpu_ptr(works, cpu)); | ||
719 | } | ||
685 | put_online_cpus(); | 720 | put_online_cpus(); |
686 | free_percpu(works); | 721 | free_percpu(works); |
687 | return 0; | 722 | return 0; |