aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c23
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/futex.c160
-rw-r--r--kernel/hrtimer.c55
-rw-r--r--kernel/irq/handle.c1
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/lockdep.c20
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/mutex-debug.c1
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/perf_event.c352
-rw-r--r--kernel/power/hibernate.c11
-rw-r--r--kernel/power/suspend_test.c5
-rw-r--r--kernel/power/swap.c43
-rw-r--r--kernel/rcupdate.c140
-rw-r--r--kernel/rcutorture.c4
-rw-r--r--kernel/rcutree.c390
-rw-r--r--kernel/rcutree.h97
-rw-r--r--kernel/rcutree_plugin.h149
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/res_counter.c18
-rw-r--r--kernel/sched.c100
-rw-r--r--kernel/sched_clock.c4
-rw-r--r--kernel/sched_fair.c74
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/timekeeping.c1
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c58
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_event_profile.c15
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_hw_branches.c8
-rw-r--r--kernel/trace/trace_output.c23
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c35
50 files changed, 1236 insertions, 777 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7ccba4bc5e3b..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -703,7 +703,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
704static int cgroup_populate_dir(struct cgroup *cgrp); 704static int cgroup_populate_dir(struct cgroup *cgrp);
705static const struct inode_operations cgroup_dir_inode_operations; 705static const struct inode_operations cgroup_dir_inode_operations;
706static struct file_operations proc_cgroupstats_operations; 706static const struct file_operations proc_cgroupstats_operations;
707 707
708static struct backing_dev_info cgroup_backing_dev_info = { 708static struct backing_dev_info cgroup_backing_dev_info = {
709 .name = "cgroup", 709 .name = "cgroup",
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1710 return -EFAULT; 1710 return -EFAULT;
1711 1711
1712 buffer[nbytes] = 0; /* nul-terminate */ 1712 buffer[nbytes] = 0; /* nul-terminate */
1713 strstrip(buffer);
1714 if (cft->write_u64) { 1713 if (cft->write_u64) {
1715 u64 val = simple_strtoull(buffer, &end, 0); 1714 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
1716 if (*end) 1715 if (*end)
1717 return -EINVAL; 1716 return -EINVAL;
1718 retval = cft->write_u64(cgrp, cft, val); 1717 retval = cft->write_u64(cgrp, cft, val);
1719 } else { 1718 } else {
1720 s64 val = simple_strtoll(buffer, &end, 0); 1719 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
1721 if (*end) 1720 if (*end)
1722 return -EINVAL; 1721 return -EINVAL;
1723 retval = cft->write_s64(cgrp, cft, val); 1722 retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1753 } 1752 }
1754 1753
1755 buffer[nbytes] = 0; /* nul-terminate */ 1754 buffer[nbytes] = 0; /* nul-terminate */
1756 strstrip(buffer); 1755 retval = cft->write_string(cgrp, cft, strstrip(buffer));
1757 retval = cft->write_string(cgrp, cft, buffer);
1758 if (!retval) 1756 if (!retval)
1759 retval = nbytes; 1757 retval = nbytes;
1760out: 1758out:
@@ -1863,7 +1861,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1863 return single_release(inode, file); 1861 return single_release(inode, file);
1864} 1862}
1865 1863
1866static struct file_operations cgroup_seqfile_operations = { 1864static const struct file_operations cgroup_seqfile_operations = {
1867 .read = seq_read, 1865 .read = seq_read,
1868 .write = cgroup_file_write, 1866 .write = cgroup_file_write,
1869 .llseek = seq_lseek, 1867 .llseek = seq_lseek,
@@ -1922,7 +1920,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1920 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1923} 1921}
1924 1922
1925static struct file_operations cgroup_file_operations = { 1923static const struct file_operations cgroup_file_operations = {
1926 .read = cgroup_file_read, 1924 .read = cgroup_file_read,
1927 .write = cgroup_file_write, 1925 .write = cgroup_file_write,
1928 .llseek = generic_file_llseek, 1926 .llseek = generic_file_llseek,
@@ -3369,7 +3367,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
3369 return single_open(file, proc_cgroup_show, pid); 3367 return single_open(file, proc_cgroup_show, pid);
3370} 3368}
3371 3369
3372struct file_operations proc_cgroup_operations = { 3370const struct file_operations proc_cgroup_operations = {
3373 .open = cgroup_open, 3371 .open = cgroup_open,
3374 .read = seq_read, 3372 .read = seq_read,
3375 .llseek = seq_lseek, 3373 .llseek = seq_lseek,
@@ -3398,7 +3396,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
3398 return single_open(file, proc_cgroupstats_show, NULL); 3396 return single_open(file, proc_cgroupstats_show, NULL);
3399} 3397}
3400 3398
3401static struct file_operations proc_cgroupstats_operations = { 3399static const struct file_operations proc_cgroupstats_operations = {
3402 .open = cgroupstats_open, 3400 .open = cgroupstats_open,
3403 .read = seq_read, 3401 .read = seq_read,
3404 .llseek = seq_lseek, 3402 .llseek = seq_lseek,
@@ -3708,8 +3706,10 @@ static void check_for_release(struct cgroup *cgrp)
3708void __css_put(struct cgroup_subsys_state *css) 3706void __css_put(struct cgroup_subsys_state *css)
3709{ 3707{
3710 struct cgroup *cgrp = css->cgroup; 3708 struct cgroup *cgrp = css->cgroup;
3709 int val;
3711 rcu_read_lock(); 3710 rcu_read_lock();
3712 if (atomic_dec_return(&css->refcnt) == 1) { 3711 val = atomic_dec_return(&css->refcnt);
3712 if (val == 1) {
3713 if (notify_on_release(cgrp)) { 3713 if (notify_on_release(cgrp)) {
3714 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3714 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3715 check_for_release(cgrp); 3715 check_for_release(cgrp);
@@ -3717,6 +3717,7 @@ void __css_put(struct cgroup_subsys_state *css)
3717 cgroup_wakeup_rmdir_waiter(cgrp); 3717 cgroup_wakeup_rmdir_waiter(cgrp);
3718 } 3718 }
3719 rcu_read_unlock(); 3719 rcu_read_unlock();
3720 WARN_ON_ONCE(val < 1);
3720} 3721}
3721 3722
3722/* 3723/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 5859f598c951..f7864ac2ecc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid)
359{ 359{
360 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
361 361
362 if (task_session(curr) != pid) { 362 if (task_session(curr) != pid)
363 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
364 proc_sid_connector(curr);
365 }
366 364
367 if (task_pgrp(curr) != pid) 365 if (task_pgrp(curr) != pid)
368 change_pid(curr, PIDTYPE_PGID, pid); 366 change_pid(curr, PIDTYPE_PGID, pid);
@@ -991,8 +989,6 @@ NORET_TYPE void do_exit(long code)
991 tsk->mempolicy = NULL; 989 tsk->mempolicy = NULL;
992#endif 990#endif
993#ifdef CONFIG_FUTEX 991#ifdef CONFIG_FUTEX
994 if (unlikely(!list_empty(&tsk->pi_state_list)))
995 exit_pi_state_list(tsk);
996 if (unlikely(current->pi_state_cache)) 992 if (unlikely(current->pi_state_cache))
997 kfree(current->pi_state_cache); 993 kfree(current->pi_state_cache);
998#endif 994#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 266c6af6ef1b..166b8c49257c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,7 +91,7 @@ int nr_processes(void)
91 int cpu; 91 int cpu;
92 int total = 0; 92 int total = 0;
93 93
94 for_each_online_cpu(cpu) 94 for_each_possible_cpu(cpu)
95 total += per_cpu(process_counts, cpu); 95 total += per_cpu(process_counts, cpu);
96 96
97 return total; 97 return total;
@@ -570,12 +570,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
570 570
571 /* Get rid of any futexes when releasing the mm */ 571 /* Get rid of any futexes when releasing the mm */
572#ifdef CONFIG_FUTEX 572#ifdef CONFIG_FUTEX
573 if (unlikely(tsk->robust_list)) 573 if (unlikely(tsk->robust_list)) {
574 exit_robust_list(tsk); 574 exit_robust_list(tsk);
575 tsk->robust_list = NULL;
576 }
575#ifdef CONFIG_COMPAT 577#ifdef CONFIG_COMPAT
576 if (unlikely(tsk->compat_robust_list)) 578 if (unlikely(tsk->compat_robust_list)) {
577 compat_exit_robust_list(tsk); 579 compat_exit_robust_list(tsk);
580 tsk->compat_robust_list = NULL;
581 }
578#endif 582#endif
583 if (unlikely(!list_empty(&tsk->pi_state_list)))
584 exit_pi_state_list(tsk);
579#endif 585#endif
580 586
581 /* Get rid of any cached register state */ 587 /* Get rid of any cached register state */
diff --git a/kernel/futex.c b/kernel/futex.c
index 248dd119a86e..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -89,36 +89,36 @@ struct futex_pi_state {
89 union futex_key key; 89 union futex_key key;
90}; 90};
91 91
92/* 92/**
93 * We use this hashed waitqueue instead of a normal wait_queue_t, so 93 * struct futex_q - The hashed futex queue entry, one per waiting task
94 * @task: the task waiting on the futex
95 * @lock_ptr: the hash bucket lock
96 * @key: the key the futex is hashed on
97 * @pi_state: optional priority inheritance state
98 * @rt_waiter: rt_waiter storage for use with requeue_pi
99 * @requeue_pi_key: the requeue_pi target futex key
100 * @bitset: bitset for the optional bitmasked wakeup
101 *
102 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
94 * we can wake only the relevant ones (hashed queues may be shared). 103 * we can wake only the relevant ones (hashed queues may be shared).
95 * 104 *
96 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 105 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
97 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. 106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
98 * The order of wakup is always to make the first condition true, then 107 * The order of wakup is always to make the first condition true, then
99 * wake up q->waiter, then make the second condition true. 108 * the second.
109 *
110 * PI futexes are typically woken before they are removed from the hash list via
111 * the rt_mutex code. See unqueue_me_pi().
100 */ 112 */
101struct futex_q { 113struct futex_q {
102 struct plist_node list; 114 struct plist_node list;
103 /* Waiter reference */
104 struct task_struct *task;
105 115
106 /* Which hash list lock to use: */ 116 struct task_struct *task;
107 spinlock_t *lock_ptr; 117 spinlock_t *lock_ptr;
108
109 /* Key which the futex is hashed on: */
110 union futex_key key; 118 union futex_key key;
111
112 /* Optional priority inheritance state: */
113 struct futex_pi_state *pi_state; 119 struct futex_pi_state *pi_state;
114
115 /* rt_waiter storage for requeue_pi: */
116 struct rt_mutex_waiter *rt_waiter; 120 struct rt_mutex_waiter *rt_waiter;
117
118 /* The expected requeue pi target futex key: */
119 union futex_key *requeue_pi_key; 121 union futex_key *requeue_pi_key;
120
121 /* Bitset for the optional bitmasked wakeup */
122 u32 bitset; 122 u32 bitset;
123}; 123};
124 124
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
150 */ 150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2) 151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{ 152{
153 return (key1->both.word == key2->both.word 153 return (key1 && key2
154 && key1->both.word == key2->both.word
154 && key1->both.ptr == key2->both.ptr 155 && key1->both.ptr == key2->both.ptr
155 && key1->both.offset == key2->both.offset); 156 && key1->both.offset == key2->both.offset);
156} 157}
@@ -198,11 +199,12 @@ static void drop_futex_key_refs(union futex_key *key)
198} 199}
199 200
200/** 201/**
201 * get_futex_key - Get parameters which are the keys for a futex. 202 * get_futex_key() - Get parameters which are the keys for a futex
202 * @uaddr: virtual address of the futex 203 * @uaddr: virtual address of the futex
203 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
204 * @key: address where result is stored. 205 * @key: address where result is stored.
205 * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE) 206 * @rw: mapping needs to be read/write (values: VERIFY_READ,
207 * VERIFY_WRITE)
206 * 208 *
207 * Returns a negative error code or 0 209 * Returns a negative error code or 0
208 * The key words are stored in *key on success. 210 * The key words are stored in *key on success.
@@ -288,8 +290,8 @@ void put_futex_key(int fshared, union futex_key *key)
288 drop_futex_key_refs(key); 290 drop_futex_key_refs(key);
289} 291}
290 292
291/* 293/**
292 * fault_in_user_writeable - fault in user address and verify RW access 294 * fault_in_user_writeable() - Fault in user address and verify RW access
293 * @uaddr: pointer to faulting user space address 295 * @uaddr: pointer to faulting user space address
294 * 296 *
295 * Slow path to fixup the fault we just took in the atomic write 297 * Slow path to fixup the fault we just took in the atomic write
@@ -309,8 +311,8 @@ static int fault_in_user_writeable(u32 __user *uaddr)
309 311
310/** 312/**
311 * futex_top_waiter() - Return the highest priority waiter on a futex 313 * futex_top_waiter() - Return the highest priority waiter on a futex
312 * @hb: the hash bucket the futex_q's reside in 314 * @hb: the hash bucket the futex_q's reside in
313 * @key: the futex key (to distinguish it from other futex futex_q's) 315 * @key: the futex key (to distinguish it from other futex futex_q's)
314 * 316 *
315 * Must be called with the hb lock held. 317 * Must be called with the hb lock held.
316 */ 318 */
@@ -588,7 +590,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
588} 590}
589 591
590/** 592/**
591 * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex 593 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
592 * @uaddr: the pi futex user address 594 * @uaddr: the pi futex user address
593 * @hb: the pi futex hash bucket 595 * @hb: the pi futex hash bucket
594 * @key: the futex key associated with uaddr and hb 596 * @key: the futex key associated with uaddr and hb
@@ -915,8 +917,8 @@ retry:
915 hb1 = hash_futex(&key1); 917 hb1 = hash_futex(&key1);
916 hb2 = hash_futex(&key2); 918 hb2 = hash_futex(&key2);
917 919
918 double_lock_hb(hb1, hb2);
919retry_private: 920retry_private:
921 double_lock_hb(hb1, hb2);
920 op_ret = futex_atomic_op_inuser(op, uaddr2); 922 op_ret = futex_atomic_op_inuser(op, uaddr2);
921 if (unlikely(op_ret < 0)) { 923 if (unlikely(op_ret < 0)) {
922 924
@@ -1011,9 +1013,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1011 1013
1012/** 1014/**
1013 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 1015 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1014 * q: the futex_q 1016 * @q: the futex_q
1015 * key: the key of the requeue target futex 1017 * @key: the key of the requeue target futex
1016 * hb: the hash_bucket of the requeue target futex 1018 * @hb: the hash_bucket of the requeue target futex
1017 * 1019 *
1018 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 1020 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1019 * target futex if it is uncontended or via a lock steal. Set the futex_q key 1021 * target futex if it is uncontended or via a lock steal. Set the futex_q key
@@ -1027,7 +1029,6 @@ static inline
1027void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1028 struct futex_hash_bucket *hb) 1030 struct futex_hash_bucket *hb)
1029{ 1031{
1030 drop_futex_key_refs(&q->key);
1031 get_futex_key_refs(key); 1032 get_futex_key_refs(key);
1032 q->key = *key; 1033 q->key = *key;
1033 1034
@@ -1225,6 +1226,7 @@ retry_private:
1225 */ 1226 */
1226 if (ret == 1) { 1227 if (ret == 1) {
1227 WARN_ON(pi_state); 1228 WARN_ON(pi_state);
1229 drop_count++;
1228 task_count++; 1230 task_count++;
1229 ret = get_futex_value_locked(&curval2, uaddr2); 1231 ret = get_futex_value_locked(&curval2, uaddr2);
1230 if (!ret) 1232 if (!ret)
@@ -1303,6 +1305,7 @@ retry_private:
1303 if (ret == 1) { 1305 if (ret == 1) {
1304 /* We got the lock. */ 1306 /* We got the lock. */
1305 requeue_pi_wake_futex(this, &key2, hb2); 1307 requeue_pi_wake_futex(this, &key2, hb2);
1308 drop_count++;
1306 continue; 1309 continue;
1307 } else if (ret) { 1310 } else if (ret) {
1308 /* -EDEADLK */ 1311 /* -EDEADLK */
@@ -1350,6 +1353,25 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1350 return hb; 1353 return hb;
1351} 1354}
1352 1355
1356static inline void
1357queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1358{
1359 spin_unlock(&hb->lock);
1360 drop_futex_key_refs(&q->key);
1361}
1362
1363/**
1364 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1365 * @q: The futex_q to enqueue
1366 * @hb: The destination hash bucket
1367 *
1368 * The hb->lock must be held by the caller, and is released here. A call to
1369 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1370 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1371 * or nothing if the unqueue is done as part of the wake process and the unqueue
1372 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1373 * an example).
1374 */
1353static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) 1375static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1354{ 1376{
1355 int prio; 1377 int prio;
@@ -1373,19 +1395,17 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1373 spin_unlock(&hb->lock); 1395 spin_unlock(&hb->lock);
1374} 1396}
1375 1397
1376static inline void 1398/**
1377queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) 1399 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1378{ 1400 * @q: The futex_q to unqueue
1379 spin_unlock(&hb->lock); 1401 *
1380 drop_futex_key_refs(&q->key); 1402 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1381} 1403 * be paired with exactly one earlier call to queue_me().
1382 1404 *
1383/* 1405 * Returns:
1384 * queue_me and unqueue_me must be called as a pair, each 1406 * 1 - if the futex_q was still queued (and we removed unqueued it)
1385 * exactly once. They are called with the hashed spinlock held. 1407 * 0 - if the futex_q was already removed by the waking thread
1386 */ 1408 */
1387
1388/* Return 1 if we were still queued (ie. 0 means we were woken) */
1389static int unqueue_me(struct futex_q *q) 1409static int unqueue_me(struct futex_q *q)
1390{ 1410{
1391 spinlock_t *lock_ptr; 1411 spinlock_t *lock_ptr;
@@ -1638,17 +1658,14 @@ out:
1638static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, 1658static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1639 struct hrtimer_sleeper *timeout) 1659 struct hrtimer_sleeper *timeout)
1640{ 1660{
1641 queue_me(q, hb);
1642
1643 /* 1661 /*
1644 * There might have been scheduling since the queue_me(), as we 1662 * The task state is guaranteed to be set before another task can
1645 * cannot hold a spinlock across the get_user() in case it 1663 * wake it. set_current_state() is implemented using set_mb() and
1646 * faults, and we cannot just set TASK_INTERRUPTIBLE state when 1664 * queue_me() calls spin_unlock() upon completion, both serializing
1647 * queueing ourselves into the futex hash. This code thus has to 1665 * access to the hash list and forcing another memory barrier.
1648 * rely on the futex_wake() code removing us from hash when it
1649 * wakes us up.
1650 */ 1666 */
1651 set_current_state(TASK_INTERRUPTIBLE); 1667 set_current_state(TASK_INTERRUPTIBLE);
1668 queue_me(q, hb);
1652 1669
1653 /* Arm the timer */ 1670 /* Arm the timer */
1654 if (timeout) { 1671 if (timeout) {
@@ -1658,8 +1675,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1658 } 1675 }
1659 1676
1660 /* 1677 /*
1661 * !plist_node_empty() is safe here without any lock. 1678 * If we have been removed from the hash list, then another task
1662 * q.lock_ptr != 0 is not safe, because of ordering against wakeup. 1679 * has tried to wake us, and we can skip the call to schedule().
1663 */ 1680 */
1664 if (likely(!plist_node_empty(&q->list))) { 1681 if (likely(!plist_node_empty(&q->list))) {
1665 /* 1682 /*
@@ -1776,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1776 current->timer_slack_ns); 1793 current->timer_slack_ns);
1777 } 1794 }
1778 1795
1796retry:
1779 /* Prepare to wait on uaddr. */ 1797 /* Prepare to wait on uaddr. */
1780 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); 1798 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1781 if (ret) 1799 if (ret)
@@ -1793,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1793 goto out_put_key; 1811 goto out_put_key;
1794 1812
1795 /* 1813 /*
1796 * We expect signal_pending(current), but another thread may 1814 * We expect signal_pending(current), but we might be the
1797 * have handled it for us already. 1815 * victim of a spurious wakeup as well.
1798 */ 1816 */
1817 if (!signal_pending(current)) {
1818 put_futex_key(fshared, &q.key);
1819 goto retry;
1820 }
1821
1799 ret = -ERESTARTSYS; 1822 ret = -ERESTARTSYS;
1800 if (!abs_time) 1823 if (!abs_time)
1801 goto out_put_key; 1824 goto out_put_key;
@@ -2102,11 +2125,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2102 * Unqueue the futex_q and determine which it was. 2125 * Unqueue the futex_q and determine which it was.
2103 */ 2126 */
2104 plist_del(&q->list, &q->list.plist); 2127 plist_del(&q->list, &q->list.plist);
2105 drop_futex_key_refs(&q->key);
2106 2128
2129 /* Handle spurious wakeups gracefully */
2130 ret = -EWOULDBLOCK;
2107 if (timeout && !timeout->task) 2131 if (timeout && !timeout->task)
2108 ret = -ETIMEDOUT; 2132 ret = -ETIMEDOUT;
2109 else 2133 else if (signal_pending(current))
2110 ret = -ERESTARTNOINTR; 2134 ret = -ERESTARTNOINTR;
2111 } 2135 }
2112 return ret; 2136 return ret;
@@ -2114,12 +2138,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2114 2138
2115/** 2139/**
2116 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 2140 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2117 * @uaddr: the futex we initialyl wait on (non-pi) 2141 * @uaddr: the futex we initially wait on (non-pi)
2118 * @fshared: whether the futexes are shared (1) or not (0). They must be 2142 * @fshared: whether the futexes are shared (1) or not (0). They must be
2119 * the same type, no requeueing from private to shared, etc. 2143 * the same type, no requeueing from private to shared, etc.
2120 * @val: the expected value of uaddr 2144 * @val: the expected value of uaddr
2121 * @abs_time: absolute timeout 2145 * @abs_time: absolute timeout
2122 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all. 2146 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2123 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) 2147 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2124 * @uaddr2: the pi futex we will take prior to returning to user-space 2148 * @uaddr2: the pi futex we will take prior to returning to user-space
2125 * 2149 *
@@ -2246,7 +2270,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2246 res = fixup_owner(uaddr2, fshared, &q, !ret); 2270 res = fixup_owner(uaddr2, fshared, &q, !ret);
2247 /* 2271 /*
2248 * If fixup_owner() returned an error, proprogate that. If it 2272 * If fixup_owner() returned an error, proprogate that. If it
2249 * acquired the lock, clear our -ETIMEDOUT or -EINTR. 2273 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2250 */ 2274 */
2251 if (res) 2275 if (res)
2252 ret = (res < 0) ? res : 0; 2276 ret = (res < 0) ? res : 0;
@@ -2302,9 +2326,9 @@ out:
2302 */ 2326 */
2303 2327
2304/** 2328/**
2305 * sys_set_robust_list - set the robust-futex list head of a task 2329 * sys_set_robust_list() - Set the robust-futex list head of a task
2306 * @head: pointer to the list-head 2330 * @head: pointer to the list-head
2307 * @len: length of the list-head, as userspace expects 2331 * @len: length of the list-head, as userspace expects
2308 */ 2332 */
2309SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, 2333SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2310 size_t, len) 2334 size_t, len)
@@ -2323,10 +2347,10 @@ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2323} 2347}
2324 2348
2325/** 2349/**
2326 * sys_get_robust_list - get the robust-futex list head of a task 2350 * sys_get_robust_list() - Get the robust-futex list head of a task
2327 * @pid: pid of the process [zero for current task] 2351 * @pid: pid of the process [zero for current task]
2328 * @head_ptr: pointer to a list-head pointer, the kernel fills it in 2352 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2329 * @len_ptr: pointer to a length field, the kernel fills in the header size 2353 * @len_ptr: pointer to a length field, the kernel fills in the header size
2330 */ 2354 */
2331SYSCALL_DEFINE3(get_robust_list, int, pid, 2355SYSCALL_DEFINE3(get_robust_list, int, pid,
2332 struct robust_list_head __user * __user *, head_ptr, 2356 struct robust_list_head __user * __user *, head_ptr,
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e5d98ce50f89..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -509,13 +509,14 @@ static inline int hrtimer_hres_active(void)
509 * next event 509 * next event
510 * Called with interrupts disabled and base->lock held 510 * Called with interrupts disabled and base->lock held
511 */ 511 */
512static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) 512static void
513hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
513{ 514{
514 int i; 515 int i;
515 struct hrtimer_clock_base *base = cpu_base->clock_base; 516 struct hrtimer_clock_base *base = cpu_base->clock_base;
516 ktime_t expires; 517 ktime_t expires, expires_next;
517 518
518 cpu_base->expires_next.tv64 = KTIME_MAX; 519 expires_next.tv64 = KTIME_MAX;
519 520
520 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 521 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
521 struct hrtimer *timer; 522 struct hrtimer *timer;
@@ -531,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
531 */ 532 */
532 if (expires.tv64 < 0) 533 if (expires.tv64 < 0)
533 expires.tv64 = 0; 534 expires.tv64 = 0;
534 if (expires.tv64 < cpu_base->expires_next.tv64) 535 if (expires.tv64 < expires_next.tv64)
535 cpu_base->expires_next = expires; 536 expires_next = expires;
536 } 537 }
537 538
539 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
540 return;
541
542 cpu_base->expires_next.tv64 = expires_next.tv64;
543
538 if (cpu_base->expires_next.tv64 != KTIME_MAX) 544 if (cpu_base->expires_next.tv64 != KTIME_MAX)
539 tick_program_event(cpu_base->expires_next, 1); 545 tick_program_event(cpu_base->expires_next, 1);
540} 546}
@@ -617,7 +623,7 @@ static void retrigger_next_event(void *arg)
617 base->clock_base[CLOCK_REALTIME].offset = 623 base->clock_base[CLOCK_REALTIME].offset =
618 timespec_to_ktime(realtime_offset); 624 timespec_to_ktime(realtime_offset);
619 625
620 hrtimer_force_reprogram(base); 626 hrtimer_force_reprogram(base, 0);
621 spin_unlock(&base->lock); 627 spin_unlock(&base->lock);
622} 628}
623 629
@@ -720,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
720 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
721 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
722 local_irq_restore(flags); 728 local_irq_restore(flags);
723 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
724 smp_processor_id());
725 return 1; 729 return 1;
726} 730}
727 731
@@ -730,7 +734,8 @@ static int hrtimer_switch_to_hres(void)
730static inline int hrtimer_hres_active(void) { return 0; } 734static inline int hrtimer_hres_active(void) { return 0; }
731static inline int hrtimer_is_hres_enabled(void) { return 0; } 735static inline int hrtimer_is_hres_enabled(void) { return 0; }
732static inline int hrtimer_switch_to_hres(void) { return 0; } 736static inline int hrtimer_switch_to_hres(void) { return 0; }
733static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 737static inline void
738hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
734static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 739static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
735 struct hrtimer_clock_base *base, 740 struct hrtimer_clock_base *base,
736 int wakeup) 741 int wakeup)
@@ -873,19 +878,29 @@ static void __remove_hrtimer(struct hrtimer *timer,
873 struct hrtimer_clock_base *base, 878 struct hrtimer_clock_base *base,
874 unsigned long newstate, int reprogram) 879 unsigned long newstate, int reprogram)
875{ 880{
876 if (timer->state & HRTIMER_STATE_ENQUEUED) { 881 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
877 /* 882 goto out;
878 * Remove the timer from the rbtree and replace the 883
879 * first entry pointer if necessary. 884 /*
880 */ 885 * Remove the timer from the rbtree and replace the first
881 if (base->first == &timer->node) { 886 * entry pointer if necessary.
882 base->first = rb_next(&timer->node); 887 */
883 /* Reprogram the clock event device. if enabled */ 888 if (base->first == &timer->node) {
884 if (reprogram && hrtimer_hres_active()) 889 base->first = rb_next(&timer->node);
885 hrtimer_force_reprogram(base->cpu_base); 890#ifdef CONFIG_HIGH_RES_TIMERS
891 /* Reprogram the clock event device. if enabled */
892 if (reprogram && hrtimer_hres_active()) {
893 ktime_t expires;
894
895 expires = ktime_sub(hrtimer_get_expires(timer),
896 base->offset);
897 if (base->cpu_base->expires_next.tv64 == expires.tv64)
898 hrtimer_force_reprogram(base->cpu_base, 1);
886 } 899 }
887 rb_erase(&timer->node, &base->active); 900#endif
888 } 901 }
902 rb_erase(&timer->node, &base->active);
903out:
889 timer->state = newstate; 904 timer->state = newstate;
890} 905}
891 906
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a81cf80554db..17c71bb565c6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/random.h> 17#include <linux/random.h>
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..bd7273e6282e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126} 128}
127 129
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cfadc1291d0b..5240d75f4c60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1333 return seq_open(filp, &kprobes_seq_ops); 1333 return seq_open(filp, &kprobes_seq_ops);
1334} 1334}
1335 1335
1336static struct file_operations debugfs_kprobes_operations = { 1336static const struct file_operations debugfs_kprobes_operations = {
1337 .open = kprobes_open, 1337 .open = kprobes_open,
1338 .read = seq_read, 1338 .read = seq_read,
1339 .llseek = seq_lseek, 1339 .llseek = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1515 return count; 1515 return count;
1516} 1516}
1517 1517
1518static struct file_operations fops_kp = { 1518static const struct file_operations fops_kp = {
1519 .read = read_enabled_file_bool, 1519 .read = read_enabled_file_bool,
1520 .write = write_enabled_file_bool, 1520 .write = write_enabled_file_bool,
1521}; 1521};
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @k: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *k, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168 set_task_cpu(k, cpu);
169 k->cpus_allowed = cpumask_of_cpu(cpu);
170 k->rt.nr_cpus_allowed = 1;
171 k->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
176 * kthread_stop - stop a thread created by kthread_create(). 153 * kthread_stop - stop a thread created by kthread_create().
177 * @k: thread created by kthread_create(). 154 * @k: thread created by kthread_create().
178 * 155 *
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3815ac1d58b2..9af56723c096 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
142#ifdef CONFIG_LOCK_STAT 142#ifdef CONFIG_LOCK_STAT
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
144 144
145static inline u64 lockstat_clock(void)
146{
147 return cpu_clock(smp_processor_id());
148}
149
145static int lock_point(unsigned long points[], unsigned long ip) 150static int lock_point(unsigned long points[], unsigned long ip)
146{ 151{
147 int i; 152 int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
158 return i; 163 return i;
159} 164}
160 165
161static void lock_time_inc(struct lock_time *lt, s64 time) 166static void lock_time_inc(struct lock_time *lt, u64 time)
162{ 167{
163 if (time > lt->max) 168 if (time > lt->max)
164 lt->max = time; 169 lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
234static void lock_release_holdtime(struct held_lock *hlock) 239static void lock_release_holdtime(struct held_lock *hlock)
235{ 240{
236 struct lock_class_stats *stats; 241 struct lock_class_stats *stats;
237 s64 holdtime; 242 u64 holdtime;
238 243
239 if (!lock_stat) 244 if (!lock_stat)
240 return; 245 return;
241 246
242 holdtime = sched_clock() - hlock->holdtime_stamp; 247 holdtime = lockstat_clock() - hlock->holdtime_stamp;
243 248
244 stats = get_lock_stats(hlock_class(hlock)); 249 stats = get_lock_stats(hlock_class(hlock));
245 if (hlock->read) 250 if (hlock->read)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2792 hlock->references = references; 2797 hlock->references = references;
2793#ifdef CONFIG_LOCK_STAT 2798#ifdef CONFIG_LOCK_STAT
2794 hlock->waittime_stamp = 0; 2799 hlock->waittime_stamp = 0;
2795 hlock->holdtime_stamp = sched_clock(); 2800 hlock->holdtime_stamp = lockstat_clock();
2796#endif 2801#endif
2797 2802
2798 if (check == 2 && !mark_irqflags(curr, hlock)) 2803 if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
3322 if (hlock->instance != lock) 3327 if (hlock->instance != lock)
3323 return; 3328 return;
3324 3329
3325 hlock->waittime_stamp = sched_clock(); 3330 hlock->waittime_stamp = lockstat_clock();
3326 3331
3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3332 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3328 contending_point = lock_point(hlock_class(hlock)->contending_point, 3333 contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3345 struct held_lock *hlock, *prev_hlock; 3350 struct held_lock *hlock, *prev_hlock;
3346 struct lock_class_stats *stats; 3351 struct lock_class_stats *stats;
3347 unsigned int depth; 3352 unsigned int depth;
3348 u64 now; 3353 u64 now, waittime = 0;
3349 s64 waittime = 0;
3350 int i, cpu; 3354 int i, cpu;
3351 3355
3352 depth = curr->lockdep_depth; 3356 depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
3374 3378
3375 cpu = smp_processor_id(); 3379 cpu = smp_processor_id();
3376 if (hlock->waittime_stamp) { 3380 if (hlock->waittime_stamp) {
3377 now = sched_clock(); 3381 now = lockstat_clock();
3378 waittime = now - hlock->waittime_stamp; 3382 waittime = now - hlock->waittime_stamp;
3379 hlock->holdtime_stamp = now; 3383 hlock->holdtime_stamp = now;
3380 } 3384 }
diff --git a/kernel/module.c b/kernel/module.c
index 5a29397ca4b6..8b7d8805819d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1992,12 +1992,14 @@ static inline unsigned long layout_symtab(struct module *mod,
1992 Elf_Shdr *sechdrs, 1992 Elf_Shdr *sechdrs,
1993 unsigned int symindex, 1993 unsigned int symindex,
1994 unsigned int strindex, 1994 unsigned int strindex,
1995 const Elf_Hdr *hdr, 1995 const Elf_Ehdr *hdr,
1996 const char *secstrings, 1996 const char *secstrings,
1997 unsigned long *pstroffs, 1997 unsigned long *pstroffs,
1998 unsigned long *strmap) 1998 unsigned long *strmap)
1999{ 1999{
2000 return 0;
2000} 2001}
2002
2001static inline void add_kallsyms(struct module *mod, 2003static inline void add_kallsyms(struct module *mod,
2002 Elf_Shdr *sechdrs, 2004 Elf_Shdr *sechdrs,
2003 unsigned int shnum, 2005 unsigned int shnum,
@@ -2081,9 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
2081 struct module *mod; 2083 struct module *mod;
2082 long err = 0; 2084 long err = 0;
2083 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2085 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
2084#ifdef CONFIG_KALLSYMS
2085 unsigned long symoffs, stroffs, *strmap; 2086 unsigned long symoffs, stroffs, *strmap;
2086#endif 2087
2087 mm_segment_t old_fs; 2088 mm_segment_t old_fs;
2088 2089
2089 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2090 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -3091,7 +3092,6 @@ void module_layout(struct module *mod,
3091 struct modversion_info *ver, 3092 struct modversion_info *ver,
3092 struct kernel_param *kp, 3093 struct kernel_param *kp,
3093 struct kernel_symbol *ks, 3094 struct kernel_symbol *ks,
3094 struct marker *marker,
3095 struct tracepoint *tp) 3095 struct tracepoint *tp)
3096{ 3096{
3097} 3097}
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 50d022e5a560..ec815a960b5d 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/poison.h> 18#include <linux/poison.h>
19#include <linux/sched.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
diff --git a/kernel/panic.c b/kernel/panic.c
index bcdef26e3332..96b45d0b4ba5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -90,6 +90,8 @@ NORET_TYPE void panic(const char * fmt, ...)
90 90
91 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 91 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
92 92
93 bust_spinlocks(0);
94
93 if (!panic_blink) 95 if (!panic_blink)
94 panic_blink = no_blink; 96 panic_blink = no_blink;
95 97
@@ -136,7 +138,6 @@ NORET_TYPE void panic(const char * fmt, ...)
136 mdelay(1); 138 mdelay(1);
137 i++; 139 i++;
138 } 140 }
139 bust_spinlocks(0);
140} 141}
141 142
142EXPORT_SYMBOL(panic); 143EXPORT_SYMBOL(panic);
diff --git a/kernel/params.c b/kernel/params.c
index 9da58eabdcb2..d656c276508d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp)
218 return -ENOSPC; 218 return -ENOSPC;
219 } 219 }
220 220
221 if (kp->flags & KPARAM_KMALLOCED)
222 kfree(*(char **)kp->arg);
223
224 /* This is a hack. We can't need to strdup in early boot, and we 221 /* This is a hack. We can't need to strdup in early boot, and we
225 * don't need to; this mangled commandline is preserved. */ 222 * don't need to; this mangled commandline is preserved. */
226 if (slab_is_available()) { 223 if (slab_is_available()) {
227 kp->flags |= KPARAM_KMALLOCED;
228 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 224 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
229 if (!kp->arg) 225 if (!*(char **)kp->arg)
230 return -ENOMEM; 226 return -ENOMEM;
231 } else 227 } else
232 *(const char **)kp->arg = val; 228 *(const char **)kp->arg = val;
@@ -304,6 +300,7 @@ static int param_array(const char *name,
304 unsigned int min, unsigned int max, 300 unsigned int min, unsigned int max,
305 void *elem, int elemsize, 301 void *elem, int elemsize,
306 int (*set)(const char *, struct kernel_param *kp), 302 int (*set)(const char *, struct kernel_param *kp),
303 u16 flags,
307 unsigned int *num) 304 unsigned int *num)
308{ 305{
309 int ret; 306 int ret;
@@ -313,6 +310,7 @@ static int param_array(const char *name,
313 /* Get the name right for errors. */ 310 /* Get the name right for errors. */
314 kp.name = name; 311 kp.name = name;
315 kp.arg = elem; 312 kp.arg = elem;
313 kp.flags = flags;
316 314
317 /* No equals sign? */ 315 /* No equals sign? */
318 if (!val) { 316 if (!val) {
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp)
358 unsigned int temp_num; 356 unsigned int temp_num;
359 357
360 return param_array(kp->name, val, 1, arr->max, arr->elem, 358 return param_array(kp->name, val, 1, arr->max, arr->elem,
361 arr->elemsize, arr->set, arr->num ?: &temp_num); 359 arr->elemsize, arr->set, kp->flags,
360 arr->num ?: &temp_num);
362} 361}
363 362
364int param_array_get(char *buffer, struct kernel_param *kp) 363int param_array_get(char *buffer, struct kernel_param *kp)
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod)
605 604
606void destroy_params(const struct kernel_param *params, unsigned num) 605void destroy_params(const struct kernel_param *params, unsigned num)
607{ 606{
608 unsigned int i; 607 /* FIXME: This should free kmalloced charp parameters. It doesn't. */
609
610 for (i = 0; i < num; i++)
611 if (params[i].flags & KPARAM_KMALLOCED)
612 kfree(*(char **)params[i].arg);
613} 608}
614 609
615static void __init kernel_add_sysfs_param(const char *name, 610static void __init kernel_add_sysfs_param(const char *name,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 76ac4db405e9..7f29643c8985 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -20,6 +20,7 @@
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/vmstat.h> 22#include <linux/vmstat.h>
23#include <linux/vmalloc.h>
23#include <linux/hardirq.h> 24#include <linux/hardirq.h>
24#include <linux/rculist.h> 25#include <linux/rculist.h>
25#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -1030,14 +1031,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1030 update_context_time(ctx); 1031 update_context_time(ctx);
1031 1032
1032 perf_disable(); 1033 perf_disable();
1033 if (ctx->nr_active) { 1034 if (ctx->nr_active)
1034 list_for_each_entry(event, &ctx->group_list, group_entry) { 1035 list_for_each_entry(event, &ctx->group_list, group_entry)
1035 if (event != event->group_leader) 1036 group_sched_out(event, cpuctx, ctx);
1036 event_sched_out(event, cpuctx, ctx); 1037
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 }
1041 perf_enable(); 1038 perf_enable();
1042 out: 1039 out:
1043 spin_unlock(&ctx->lock); 1040 spin_unlock(&ctx->lock);
@@ -1258,12 +1255,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1258 if (event->cpu != -1 && event->cpu != cpu) 1255 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1256 continue;
1260 1257
1261 if (event != event->group_leader) 1258 if (group_can_go_on(event, cpuctx, 1))
1262 event_sched_in(event, cpuctx, ctx, cpu); 1259 group_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267 1260
1268 /* 1261 /*
1269 * If this pinned group hasn't been scheduled, 1262 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1284,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1291 if (event->cpu != -1 && event->cpu != cpu) 1284 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1285 continue;
1293 1286
1294 if (event != event->group_leader) { 1287 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (event_sched_in(event, cpuctx, ctx, cpu)) 1288 if (group_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1289 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 } 1290 }
1304 perf_enable(); 1291 perf_enable();
1305 out: 1292 out:
@@ -1368,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1368 u64 interrupts, freq; 1355 u64 interrupts, freq;
1369 1356
1370 spin_lock(&ctx->lock); 1357 spin_lock(&ctx->lock);
1371 list_for_each_entry(event, &ctx->group_list, group_entry) { 1358 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1372 if (event->state != PERF_EVENT_STATE_ACTIVE) 1359 if (event->state != PERF_EVENT_STATE_ACTIVE)
1373 continue; 1360 continue;
1374 1361
@@ -2105,49 +2092,31 @@ unlock:
2105 rcu_read_unlock(); 2092 rcu_read_unlock();
2106} 2093}
2107 2094
2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2095static unsigned long perf_data_size(struct perf_mmap_data *data)
2109{ 2096{
2110 struct perf_event *event = vma->vm_file->private_data; 2097 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2111 struct perf_mmap_data *data; 2098}
2112 int ret = VM_FAULT_SIGBUS;
2113
2114 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2115 if (vmf->pgoff == 0)
2116 ret = 0;
2117 return ret;
2118 }
2119
2120 rcu_read_lock();
2121 data = rcu_dereference(event->data);
2122 if (!data)
2123 goto unlock;
2124
2125 if (vmf->pgoff == 0) {
2126 vmf->page = virt_to_page(data->user_page);
2127 } else {
2128 int nr = vmf->pgoff - 1;
2129
2130 if ((unsigned)nr > data->nr_pages)
2131 goto unlock;
2132 2099
2133 if (vmf->flags & FAULT_FLAG_WRITE) 2100#ifndef CONFIG_PERF_USE_VMALLOC
2134 goto unlock;
2135 2101
2136 vmf->page = virt_to_page(data->data_pages[nr]); 2102/*
2137 } 2103 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2104 */
2138 2105
2139 get_page(vmf->page); 2106static struct page *
2140 vmf->page->mapping = vma->vm_file->f_mapping; 2107perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2141 vmf->page->index = vmf->pgoff; 2108{
2109 if (pgoff > data->nr_pages)
2110 return NULL;
2142 2111
2143 ret = 0; 2112 if (pgoff == 0)
2144unlock: 2113 return virt_to_page(data->user_page);
2145 rcu_read_unlock();
2146 2114
2147 return ret; 2115 return virt_to_page(data->data_pages[pgoff - 1]);
2148} 2116}
2149 2117
2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2118static struct perf_mmap_data *
2119perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2151{ 2120{
2152 struct perf_mmap_data *data; 2121 struct perf_mmap_data *data;
2153 unsigned long size; 2122 unsigned long size;
@@ -2172,19 +2141,10 @@ static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2172 goto fail_data_pages; 2141 goto fail_data_pages;
2173 } 2142 }
2174 2143
2144 data->data_order = 0;
2175 data->nr_pages = nr_pages; 2145 data->nr_pages = nr_pages;
2176 atomic_set(&data->lock, -1);
2177
2178 if (event->attr.watermark) {
2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2180 event->attr.wakeup_watermark);
2181 }
2182 if (!data->watermark)
2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2184
2185 rcu_assign_pointer(event->data, data);
2186 2146
2187 return 0; 2147 return data;
2188 2148
2189fail_data_pages: 2149fail_data_pages:
2190 for (i--; i >= 0; i--) 2150 for (i--; i >= 0; i--)
@@ -2196,7 +2156,7 @@ fail_user_page:
2196 kfree(data); 2156 kfree(data);
2197 2157
2198fail: 2158fail:
2199 return -ENOMEM; 2159 return NULL;
2200} 2160}
2201 2161
2202static void perf_mmap_free_page(unsigned long addr) 2162static void perf_mmap_free_page(unsigned long addr)
@@ -2207,28 +2167,169 @@ static void perf_mmap_free_page(unsigned long addr)
2207 __free_page(page); 2167 __free_page(page);
2208} 2168}
2209 2169
2210static void __perf_mmap_data_free(struct rcu_head *rcu_head) 2170static void perf_mmap_data_free(struct perf_mmap_data *data)
2211{ 2171{
2212 struct perf_mmap_data *data;
2213 int i; 2172 int i;
2214 2173
2215 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2216
2217 perf_mmap_free_page((unsigned long)data->user_page); 2174 perf_mmap_free_page((unsigned long)data->user_page);
2218 for (i = 0; i < data->nr_pages; i++) 2175 for (i = 0; i < data->nr_pages; i++)
2219 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2176 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2177}
2178
2179#else
2180
2181/*
2182 * Back perf_mmap() with vmalloc memory.
2183 *
2184 * Required for architectures that have d-cache aliasing issues.
2185 */
2186
2187static struct page *
2188perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2189{
2190 if (pgoff > (1UL << data->data_order))
2191 return NULL;
2192
2193 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2194}
2195
2196static void perf_mmap_unmark_page(void *addr)
2197{
2198 struct page *page = vmalloc_to_page(addr);
2199
2200 page->mapping = NULL;
2201}
2202
2203static void perf_mmap_data_free_work(struct work_struct *work)
2204{
2205 struct perf_mmap_data *data;
2206 void *base;
2207 int i, nr;
2208
2209 data = container_of(work, struct perf_mmap_data, work);
2210 nr = 1 << data->data_order;
2220 2211
2212 base = data->user_page;
2213 for (i = 0; i < nr + 1; i++)
2214 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2215
2216 vfree(base);
2217}
2218
2219static void perf_mmap_data_free(struct perf_mmap_data *data)
2220{
2221 schedule_work(&data->work);
2222}
2223
2224static struct perf_mmap_data *
2225perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2226{
2227 struct perf_mmap_data *data;
2228 unsigned long size;
2229 void *all_buf;
2230
2231 WARN_ON(atomic_read(&event->mmap_count));
2232
2233 size = sizeof(struct perf_mmap_data);
2234 size += sizeof(void *);
2235
2236 data = kzalloc(size, GFP_KERNEL);
2237 if (!data)
2238 goto fail;
2239
2240 INIT_WORK(&data->work, perf_mmap_data_free_work);
2241
2242 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2243 if (!all_buf)
2244 goto fail_all_buf;
2245
2246 data->user_page = all_buf;
2247 data->data_pages[0] = all_buf + PAGE_SIZE;
2248 data->data_order = ilog2(nr_pages);
2249 data->nr_pages = 1;
2250
2251 return data;
2252
2253fail_all_buf:
2221 kfree(data); 2254 kfree(data);
2255
2256fail:
2257 return NULL;
2222} 2258}
2223 2259
2224static void perf_mmap_data_free(struct perf_event *event) 2260#endif
2261
2262static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2263{
2264 struct perf_event *event = vma->vm_file->private_data;
2265 struct perf_mmap_data *data;
2266 int ret = VM_FAULT_SIGBUS;
2267
2268 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2269 if (vmf->pgoff == 0)
2270 ret = 0;
2271 return ret;
2272 }
2273
2274 rcu_read_lock();
2275 data = rcu_dereference(event->data);
2276 if (!data)
2277 goto unlock;
2278
2279 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2280 goto unlock;
2281
2282 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2283 if (!vmf->page)
2284 goto unlock;
2285
2286 get_page(vmf->page);
2287 vmf->page->mapping = vma->vm_file->f_mapping;
2288 vmf->page->index = vmf->pgoff;
2289
2290 ret = 0;
2291unlock:
2292 rcu_read_unlock();
2293
2294 return ret;
2295}
2296
2297static void
2298perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2299{
2300 long max_size = perf_data_size(data);
2301
2302 atomic_set(&data->lock, -1);
2303
2304 if (event->attr.watermark) {
2305 data->watermark = min_t(long, max_size,
2306 event->attr.wakeup_watermark);
2307 }
2308
2309 if (!data->watermark)
2310 data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
2311
2312
2313 rcu_assign_pointer(event->data, data);
2314}
2315
2316static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2317{
2318 struct perf_mmap_data *data;
2319
2320 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2321 perf_mmap_data_free(data);
2322 kfree(data);
2323}
2324
2325static void perf_mmap_data_release(struct perf_event *event)
2225{ 2326{
2226 struct perf_mmap_data *data = event->data; 2327 struct perf_mmap_data *data = event->data;
2227 2328
2228 WARN_ON(atomic_read(&event->mmap_count)); 2329 WARN_ON(atomic_read(&event->mmap_count));
2229 2330
2230 rcu_assign_pointer(event->data, NULL); 2331 rcu_assign_pointer(event->data, NULL);
2231 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2332 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2232} 2333}
2233 2334
2234static void perf_mmap_open(struct vm_area_struct *vma) 2335static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2244,16 +2345,17 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2244 2345
2245 WARN_ON_ONCE(event->ctx->parent_ctx); 2346 WARN_ON_ONCE(event->ctx->parent_ctx);
2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 2347 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2348 unsigned long size = perf_data_size(event->data);
2247 struct user_struct *user = current_user(); 2349 struct user_struct *user = current_user();
2248 2350
2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); 2351 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2250 vma->vm_mm->locked_vm -= event->data->nr_locked; 2352 vma->vm_mm->locked_vm -= event->data->nr_locked;
2251 perf_mmap_data_free(event); 2353 perf_mmap_data_release(event);
2252 mutex_unlock(&event->mmap_mutex); 2354 mutex_unlock(&event->mmap_mutex);
2253 } 2355 }
2254} 2356}
2255 2357
2256static struct vm_operations_struct perf_mmap_vmops = { 2358static const struct vm_operations_struct perf_mmap_vmops = {
2257 .open = perf_mmap_open, 2359 .open = perf_mmap_open,
2258 .close = perf_mmap_close, 2360 .close = perf_mmap_close,
2259 .fault = perf_mmap_fault, 2361 .fault = perf_mmap_fault,
@@ -2266,6 +2368,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2266 unsigned long user_locked, user_lock_limit; 2368 unsigned long user_locked, user_lock_limit;
2267 struct user_struct *user = current_user(); 2369 struct user_struct *user = current_user();
2268 unsigned long locked, lock_limit; 2370 unsigned long locked, lock_limit;
2371 struct perf_mmap_data *data;
2269 unsigned long vma_size; 2372 unsigned long vma_size;
2270 unsigned long nr_pages; 2373 unsigned long nr_pages;
2271 long user_extra, extra; 2374 long user_extra, extra;
@@ -2328,10 +2431,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 } 2431 }
2329 2432
2330 WARN_ON(event->data); 2433 WARN_ON(event->data);
2331 ret = perf_mmap_data_alloc(event, nr_pages); 2434
2332 if (ret) 2435 data = perf_mmap_data_alloc(event, nr_pages);
2436 ret = -ENOMEM;
2437 if (!data)
2333 goto unlock; 2438 goto unlock;
2334 2439
2440 ret = 0;
2441 perf_mmap_data_init(event, data);
2442
2335 atomic_set(&event->mmap_count, 1); 2443 atomic_set(&event->mmap_count, 1);
2336 atomic_long_add(user_extra, &user->locked_vm); 2444 atomic_long_add(user_extra, &user->locked_vm);
2337 vma->vm_mm->locked_vm += extra; 2445 vma->vm_mm->locked_vm += extra;
@@ -2519,7 +2627,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2519 if (!data->writable) 2627 if (!data->writable)
2520 return true; 2628 return true;
2521 2629
2522 mask = (data->nr_pages << PAGE_SHIFT) - 1; 2630 mask = perf_data_size(data) - 1;
2523 2631
2524 offset = (offset - tail) & mask; 2632 offset = (offset - tail) & mask;
2525 head = (head - tail) & mask; 2633 head = (head - tail) & mask;
@@ -2624,7 +2732,7 @@ void perf_output_copy(struct perf_output_handle *handle,
2624 const void *buf, unsigned int len) 2732 const void *buf, unsigned int len)
2625{ 2733{
2626 unsigned int pages_mask; 2734 unsigned int pages_mask;
2627 unsigned int offset; 2735 unsigned long offset;
2628 unsigned int size; 2736 unsigned int size;
2629 void **pages; 2737 void **pages;
2630 2738
@@ -2633,12 +2741,14 @@ void perf_output_copy(struct perf_output_handle *handle,
2633 pages = handle->data->data_pages; 2741 pages = handle->data->data_pages;
2634 2742
2635 do { 2743 do {
2636 unsigned int page_offset; 2744 unsigned long page_offset;
2745 unsigned long page_size;
2637 int nr; 2746 int nr;
2638 2747
2639 nr = (offset >> PAGE_SHIFT) & pages_mask; 2748 nr = (offset >> PAGE_SHIFT) & pages_mask;
2640 page_offset = offset & (PAGE_SIZE - 1); 2749 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2641 size = min_t(unsigned int, PAGE_SIZE - page_offset, len); 2750 page_offset = offset & (page_size - 1);
2751 size = min_t(unsigned int, page_size - page_offset, len);
2642 2752
2643 memcpy(pages[nr] + page_offset, buf, size); 2753 memcpy(pages[nr] + page_offset, buf, size);
2644 2754
@@ -3849,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3849 regs = task_pt_regs(current); 3959 regs = task_pt_regs(current);
3850 3960
3851 if (regs) { 3961 if (regs) {
3852 if (perf_event_overflow(event, 0, &data, regs)) 3962 if (!(event->attr.exclude_idle && current->pid == 0))
3853 ret = HRTIMER_NORESTART; 3963 if (perf_event_overflow(event, 0, &data, regs))
3964 ret = HRTIMER_NORESTART;
3854 } 3965 }
3855 3966
3856 period = max_t(u64, 10000, event->hw.sample_period); 3967 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3859,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3859 return ret; 3970 return ret;
3860} 3971}
3861 3972
3973static void perf_swevent_start_hrtimer(struct perf_event *event)
3974{
3975 struct hw_perf_event *hwc = &event->hw;
3976
3977 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3978 hwc->hrtimer.function = perf_swevent_hrtimer;
3979 if (hwc->sample_period) {
3980 u64 period;
3981
3982 if (hwc->remaining) {
3983 if (hwc->remaining < 0)
3984 period = 10000;
3985 else
3986 period = hwc->remaining;
3987 hwc->remaining = 0;
3988 } else {
3989 period = max_t(u64, 10000, hwc->sample_period);
3990 }
3991 __hrtimer_start_range_ns(&hwc->hrtimer,
3992 ns_to_ktime(period), 0,
3993 HRTIMER_MODE_REL, 0);
3994 }
3995}
3996
3997static void perf_swevent_cancel_hrtimer(struct perf_event *event)
3998{
3999 struct hw_perf_event *hwc = &event->hw;
4000
4001 if (hwc->sample_period) {
4002 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4003 hwc->remaining = ktime_to_ns(remaining);
4004
4005 hrtimer_cancel(&hwc->hrtimer);
4006 }
4007}
4008
3862/* 4009/*
3863 * Software event: cpu wall time clock 4010 * Software event: cpu wall time clock
3864 */ 4011 */
@@ -3881,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3881 int cpu = raw_smp_processor_id(); 4028 int cpu = raw_smp_processor_id();
3882 4029
3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4030 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4031 perf_swevent_start_hrtimer(event);
3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3886 if (hwc->sample_period) {
3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3888 __hrtimer_start_range_ns(&hwc->hrtimer,
3889 ns_to_ktime(period), 0,
3890 HRTIMER_MODE_REL, 0);
3891 }
3892 4032
3893 return 0; 4033 return 0;
3894} 4034}
3895 4035
3896static void cpu_clock_perf_event_disable(struct perf_event *event) 4036static void cpu_clock_perf_event_disable(struct perf_event *event)
3897{ 4037{
3898 if (event->hw.sample_period) 4038 perf_swevent_cancel_hrtimer(event);
3899 hrtimer_cancel(&event->hw.hrtimer);
3900 cpu_clock_perf_event_update(event); 4039 cpu_clock_perf_event_update(event);
3901} 4040}
3902 4041
@@ -3933,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
3933 now = event->ctx->time; 4072 now = event->ctx->time;
3934 4073
3935 atomic64_set(&hwc->prev_count, now); 4074 atomic64_set(&hwc->prev_count, now);
3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4075
3937 hwc->hrtimer.function = perf_swevent_hrtimer; 4076 perf_swevent_start_hrtimer(event);
3938 if (hwc->sample_period) {
3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3940 __hrtimer_start_range_ns(&hwc->hrtimer,
3941 ns_to_ktime(period), 0,
3942 HRTIMER_MODE_REL, 0);
3943 }
3944 4077
3945 return 0; 4078 return 0;
3946} 4079}
3947 4080
3948static void task_clock_perf_event_disable(struct perf_event *event) 4081static void task_clock_perf_event_disable(struct perf_event *event)
3949{ 4082{
3950 if (event->hw.sample_period) 4083 perf_swevent_cancel_hrtimer(event);
3951 hrtimer_cancel(&event->hw.hrtimer);
3952 task_clock_perf_event_update(event, event->ctx->time); 4084 task_clock_perf_event_update(event, event->ctx->time);
3953 4085
3954} 4086}
@@ -4781,9 +4913,7 @@ int perf_event_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 4913 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 4914 * the list, not manipulating it:
4783 */ 4915 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 4916 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787 4917
4788 if (!event->attr.inherit) { 4918 if (!event->attr.inherit) {
4789 inherited_all = 0; 4919 inherited_all = 0;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04b3a83d686f..04a9e90d248f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,21 +693,22 @@ static int software_resume(void)
693 /* The snapshot device should not be opened while we're running */ 693 /* The snapshot device should not be opened while we're running */
694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
695 error = -EBUSY; 695 error = -EBUSY;
696 swsusp_close(FMODE_READ);
696 goto Unlock; 697 goto Unlock;
697 } 698 }
698 699
699 pm_prepare_console(); 700 pm_prepare_console();
700 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 701 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
701 if (error) 702 if (error)
702 goto Finish; 703 goto close_finish;
703 704
704 error = usermodehelper_disable(); 705 error = usermodehelper_disable();
705 if (error) 706 if (error)
706 goto Finish; 707 goto close_finish;
707 708
708 error = create_basic_memory_bitmaps(); 709 error = create_basic_memory_bitmaps();
709 if (error) 710 if (error)
710 goto Finish; 711 goto close_finish;
711 712
712 pr_debug("PM: Preparing processes for restore.\n"); 713 pr_debug("PM: Preparing processes for restore.\n");
713 error = prepare_processes(); 714 error = prepare_processes();
@@ -719,6 +720,7 @@ static int software_resume(void)
719 pr_debug("PM: Reading hibernation image.\n"); 720 pr_debug("PM: Reading hibernation image.\n");
720 721
721 error = swsusp_read(&flags); 722 error = swsusp_read(&flags);
723 swsusp_close(FMODE_READ);
722 if (!error) 724 if (!error)
723 hibernation_restore(flags & SF_PLATFORM_MODE); 725 hibernation_restore(flags & SF_PLATFORM_MODE);
724 726
@@ -737,6 +739,9 @@ static int software_resume(void)
737 mutex_unlock(&pm_mutex); 739 mutex_unlock(&pm_mutex);
738 pr_debug("PM: Resume from disk failed.\n"); 740 pr_debug("PM: Resume from disk failed.\n");
739 return error; 741 return error;
742close_finish:
743 swsusp_close(FMODE_READ);
744 goto Finish;
740} 745}
741 746
742late_initcall(software_resume); 747late_initcall(software_resume);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 17d8bb1acf9c..25596e450ac7 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -19,7 +19,7 @@
19 * The time it takes is system-specific though, so when we test this 19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time. 20 * during system bootup we allow a LOT of time.
21 */ 21 */
22#define TEST_SUSPEND_SECONDS 5 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25 25
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
49 * has some performance issues. The stack dump of a WARN_ON 49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk... 50 * is more likely to get the right attention than a printk...
51 */ 51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); 52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
53 "Component: %s, time: %u\n", label, msec);
53} 54}
54 55
55/* 56/*
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b101cdc4df3f..890f6b11b1d3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle,
314{ 314{
315 unsigned int m; 315 unsigned int m;
316 int ret; 316 int ret;
317 int error = 0;
318 int nr_pages; 317 int nr_pages;
319 int err2; 318 int err2;
320 struct bio *bio; 319 struct bio *bio;
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle,
329 nr_pages = 0; 328 nr_pages = 0;
330 bio = NULL; 329 bio = NULL;
331 do_gettimeofday(&start); 330 do_gettimeofday(&start);
332 do { 331 while (1) {
333 ret = snapshot_read_next(snapshot, PAGE_SIZE); 332 ret = snapshot_read_next(snapshot, PAGE_SIZE);
334 if (ret > 0) { 333 if (ret <= 0)
335 error = swap_write_page(handle, data_of(*snapshot), 334 break;
336 &bio); 335 ret = swap_write_page(handle, data_of(*snapshot), &bio);
337 if (error) 336 if (ret)
338 break; 337 break;
339 if (!(nr_pages % m)) 338 if (!(nr_pages % m))
340 printk("\b\b\b\b%3d%%", nr_pages / m); 339 printk("\b\b\b\b%3d%%", nr_pages / m);
341 nr_pages++; 340 nr_pages++;
342 } 341 }
343 } while (ret > 0);
344 err2 = wait_on_bio_chain(&bio); 342 err2 = wait_on_bio_chain(&bio);
345 do_gettimeofday(&stop); 343 do_gettimeofday(&stop);
346 if (!error) 344 if (!ret)
347 error = err2; 345 ret = err2;
348 if (!error) 346 if (!ret)
349 printk("\b\b\b\bdone\n"); 347 printk("\b\b\b\bdone\n");
348 else
349 printk("\n");
350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
351 return error; 351 return ret;
352} 352}
353 353
354/** 354/**
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle,
536 snapshot_write_finalize(snapshot); 536 snapshot_write_finalize(snapshot);
537 if (!snapshot_image_loaded(snapshot)) 537 if (!snapshot_image_loaded(snapshot))
538 error = -ENODATA; 538 error = -ENODATA;
539 } 539 } else
540 printk("\n");
540 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 541 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
541 return error; 542 return error;
542} 543}
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p)
572 error = load_image(&handle, &snapshot, header->pages - 1); 573 error = load_image(&handle, &snapshot, header->pages - 1);
573 release_swap_reader(&handle); 574 release_swap_reader(&handle);
574 575
575 blkdev_put(resume_bdev, FMODE_READ);
576
577 if (!error) 576 if (!error)
578 pr_debug("PM: Image successfully loaded\n"); 577 pr_debug("PM: Image successfully loaded\n");
579 else 578 else
@@ -596,7 +595,7 @@ int swsusp_check(void)
596 error = bio_read_page(swsusp_resume_block, 595 error = bio_read_page(swsusp_resume_block,
597 swsusp_header, NULL); 596 swsusp_header, NULL);
598 if (error) 597 if (error)
599 return error; 598 goto put;
600 599
601 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { 600 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
602 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 601 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
@@ -604,8 +603,10 @@ int swsusp_check(void)
604 error = bio_write_page(swsusp_resume_block, 603 error = bio_write_page(swsusp_resume_block,
605 swsusp_header, NULL); 604 swsusp_header, NULL);
606 } else { 605 } else {
607 return -EINVAL; 606 error = -EINVAL;
608 } 607 }
608
609put:
609 if (error) 610 if (error)
610 blkdev_put(resume_bdev, FMODE_READ); 611 blkdev_put(resume_bdev, FMODE_READ);
611 else 612 else
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 37ac45483082..400183346ad2 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -46,22 +46,15 @@
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h> 47#include <linux/kernel_stat.h>
48 48
49enum rcu_barrier { 49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 RCU_BARRIER_STD, 50static struct lock_class_key rcu_lock_key;
51 RCU_BARRIER_BH, 51struct lockdep_map rcu_lock_map =
52 RCU_BARRIER_SCHED, 52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53}; 53EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif
54 55
55static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
56static atomic_t rcu_barrier_cpu_count;
57static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 56int rcu_scheduler_active __read_mostly;
60 57
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
65/* 58/*
66 * Awaken the corresponding synchronize_rcu() instance now that a 59 * Awaken the corresponding synchronize_rcu() instance now that a
67 * grace period has elapsed. 60 * grace period has elapsed.
@@ -164,129 +157,10 @@ void synchronize_rcu_bh(void)
164} 157}
165EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 158EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
166 159
167static void rcu_barrier_callback(struct rcu_head *notused)
168{
169 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
170 complete(&rcu_barrier_completion);
171}
172
173/*
174 * Called with preemption disabled, and from cross-cpu IRQ context.
175 */
176static void rcu_barrier_func(void *type)
177{
178 int cpu = smp_processor_id();
179 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
180
181 atomic_inc(&rcu_barrier_cpu_count);
182 switch ((enum rcu_barrier)type) {
183 case RCU_BARRIER_STD:
184 call_rcu(head, rcu_barrier_callback);
185 break;
186 case RCU_BARRIER_BH:
187 call_rcu_bh(head, rcu_barrier_callback);
188 break;
189 case RCU_BARRIER_SCHED:
190 call_rcu_sched(head, rcu_barrier_callback);
191 break;
192 }
193}
194
195static inline void wait_migrated_callbacks(void)
196{
197 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
198 smp_mb(); /* In case we didn't sleep. */
199}
200
201/*
202 * Orchestrate the specified type of RCU barrier, waiting for all
203 * RCU callbacks of the specified type to complete.
204 */
205static void _rcu_barrier(enum rcu_barrier type)
206{
207 BUG_ON(in_interrupt());
208 /* Take cpucontrol mutex to protect against CPU hotplug */
209 mutex_lock(&rcu_barrier_mutex);
210 init_completion(&rcu_barrier_completion);
211 /*
212 * Initialize rcu_barrier_cpu_count to 1, then invoke
213 * rcu_barrier_func() on each CPU, so that each CPU also has
214 * incremented rcu_barrier_cpu_count. Only then is it safe to
215 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
216 * might complete its grace period before all of the other CPUs
217 * did their increment, causing this function to return too
218 * early.
219 */
220 atomic_set(&rcu_barrier_cpu_count, 1);
221 on_each_cpu(rcu_barrier_func, (void *)type, 1);
222 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
223 complete(&rcu_barrier_completion);
224 wait_for_completion(&rcu_barrier_completion);
225 mutex_unlock(&rcu_barrier_mutex);
226 wait_migrated_callbacks();
227}
228
229/**
230 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
231 */
232void rcu_barrier(void)
233{
234 _rcu_barrier(RCU_BARRIER_STD);
235}
236EXPORT_SYMBOL_GPL(rcu_barrier);
237
238/**
239 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
240 */
241void rcu_barrier_bh(void)
242{
243 _rcu_barrier(RCU_BARRIER_BH);
244}
245EXPORT_SYMBOL_GPL(rcu_barrier_bh);
246
247/**
248 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
249 */
250void rcu_barrier_sched(void)
251{
252 _rcu_barrier(RCU_BARRIER_SCHED);
253}
254EXPORT_SYMBOL_GPL(rcu_barrier_sched);
255
256static void rcu_migrate_callback(struct rcu_head *notused)
257{
258 if (atomic_dec_and_test(&rcu_migrate_type_count))
259 wake_up(&rcu_migrate_wq);
260}
261
262extern int rcu_cpu_notify(struct notifier_block *self,
263 unsigned long action, void *hcpu);
264
265static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 160static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
266 unsigned long action, void *hcpu) 161 unsigned long action, void *hcpu)
267{ 162{
268 rcu_cpu_notify(self, action, hcpu); 163 return rcu_cpu_notify(self, action, hcpu);
269 if (action == CPU_DYING) {
270 /*
271 * preempt_disable() in on_each_cpu() prevents stop_machine(),
272 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
273 * returns, all online cpus have queued rcu_barrier_func(),
274 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
275 *
276 * These callbacks ensure _rcu_barrier() waits for all
277 * RCU callbacks of the specified type to complete.
278 */
279 atomic_set(&rcu_migrate_type_count, 3);
280 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
281 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
282 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
283 } else if (action == CPU_DOWN_PREPARE) {
284 /* Don't need to wait until next removal operation. */
285 /* rcu_migrate_head is protected by cpu_add_remove_lock */
286 wait_migrated_callbacks();
287 }
288
289 return NOTIFY_OK;
290} 164}
291 165
292void __init rcu_init(void) 166void __init rcu_init(void)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 233768f21f97..697c0a0229d4 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -606,8 +606,6 @@ static struct rcu_torture_ops sched_ops_sync = {
606 .name = "sched_sync" 606 .name = "sched_sync"
607}; 607};
608 608
609extern int rcu_expedited_torture_stats(char *page);
610
611static struct rcu_torture_ops sched_expedited_ops = { 609static struct rcu_torture_ops sched_expedited_ops = {
612 .init = rcu_sync_torture_init, 610 .init = rcu_sync_torture_init,
613 .cleanup = NULL, 611 .cleanup = NULL,
@@ -650,7 +648,7 @@ rcu_torture_writer(void *arg)
650 old_rp = rcu_torture_current; 648 old_rp = rcu_torture_current;
651 rp->rtort_mbtest = 1; 649 rp->rtort_mbtest = 1;
652 rcu_assign_pointer(rcu_torture_current, rp); 650 rcu_assign_pointer(rcu_torture_current, rp);
653 smp_wmb(); 651 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
654 if (old_rp) { 652 if (old_rp) {
655 i = old_rp->rtort_pipe_count; 653 i = old_rp->rtort_pipe_count;
656 if (i > RCU_TORTURE_PIPE_LEN) 654 if (i > RCU_TORTURE_PIPE_LEN)
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158c..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -49,13 +49,6 @@
49 49
50#include "rcutree.h" 50#include "rcutree.h"
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 52/* Data structures. */
60 53
61#define RCU_STATE_INITIALIZER(name) { \ 54#define RCU_STATE_INITIALIZER(name) { \
@@ -66,10 +59,13 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
66 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
67 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
68 }, \ 61 }, \
69 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 63 .gpnum = -300, \
71 .completed = -300, \ 64 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 70 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 71 .n_force_qs_ngp = 0, \
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 79
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 80
101#include "rcutree_plugin.h" 81/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
83 * permit this function to be invoked without holding the root rcu_node
84 * structure's ->lock, but of course results can be subject to change.
85 */
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
102 90
103/* 91/*
104 * Note a quiescent state. Because we do not need to know 92 * Note a quiescent state. Because we do not need to know
@@ -137,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */ 125static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */ 126static int qlowmark = 100; /* Once only this many pending, use blimit. */
139 127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
140static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu); 133static int rcu_pending(int cpu);
142 134
@@ -173,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 165static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 167{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 169}
180 170
181/* 171/*
@@ -369,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
369/* 359/*
370 * Snapshot the specified CPU's dynticks counter so that we can later 360 * Snapshot the specified CPU's dynticks counter so that we can later
371 * credit them with an implicit quiescent state. Return 1 if this CPU 361 * credit them with an implicit quiescent state. Return 1 if this CPU
372 * is already in a quiescent state courtesy of dynticks idle mode. 362 * is in dynticks idle mode, which is an extended quiescent state.
373 */ 363 */
374static int dyntick_save_progress_counter(struct rcu_data *rdp) 364static int dyntick_save_progress_counter(struct rcu_data *rdp)
375{ 365{
@@ -475,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
475 long delta; 465 long delta;
476 unsigned long flags; 466 unsigned long flags;
477 struct rcu_node *rnp = rcu_get_root(rsp); 467 struct rcu_node *rnp = rcu_get_root(rsp);
478 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
479 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
480 468
481 /* Only let one CPU complain about others per time interval. */ 469 /* Only let one CPU complain about others per time interval. */
482 470
483 spin_lock_irqsave(&rnp->lock, flags); 471 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 472 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 474 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 475 return;
488 } 476 }
489 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479 /*
480 * Now rat on any tasks that got kicked up to the root rcu_node
481 * due to CPU offlining.
482 */
483 rcu_print_task_stall(rnp);
490 spin_unlock_irqrestore(&rnp->lock, flags); 484 spin_unlock_irqrestore(&rnp->lock, flags);
491 485
492 /* OK, time to rat on our buddy... */ 486 /* OK, time to rat on our buddy... */
493 487
494 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
495 for (; rnp_cur < rnp_end; rnp_cur++) { 489 rcu_for_each_leaf_node(rsp, rnp) {
496 rcu_print_task_stall(rnp); 490 rcu_print_task_stall(rnp);
497 if (rnp_cur->qsmask == 0) 491 if (rnp->qsmask == 0)
498 continue; 492 continue;
499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
500 if (rnp_cur->qsmask & (1UL << cpu)) 494 if (rnp->qsmask & (1UL << cpu))
501 printk(" %d", rnp_cur->grplo + cpu); 495 printk(" %d", rnp->grplo + cpu);
502 } 496 }
503 printk(" (detected by %d, t=%ld jiffies)\n", 497 printk(" (detected by %d, t=%ld jiffies)\n",
504 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -537,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 531 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 532 print_cpu_stall(rsp);
539 533
540 } else if (rsp->gpnum != rsp->completed && 534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 535
543 /* They had two time units to dump stack, so complain. */ 536 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 537 print_other_cpu_stall(rsp);
@@ -617,9 +610,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
617 note_new_gpnum(rsp, rdp); 610 note_new_gpnum(rsp, rdp);
618 611
619 /* 612 /*
620 * Because we are first, we know that all our callbacks will 613 * Because this CPU just now started the new grace period, we know
621 * be covered by this upcoming grace period, even the ones 614 * that all of its callbacks will be covered by this upcoming grace
622 * that were registered arbitrarily recently. 615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
623 */ 622 */
624 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
@@ -657,15 +656,18 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * one corresponding to this CPU, due to the fact that we have 656 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled. 657 * irqs disabled.
659 */ 658 */
660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
661 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
662 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
663 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
664 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
665 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
666 } 665 }
667 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
668 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
669 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
670} 672}
671 673
@@ -703,10 +705,11 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 705 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 706 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 708 __releases(rcu_get_root(rsp)->lock)
707{ 709{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
712} 715}
@@ -842,17 +845,63 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
842#ifdef CONFIG_HOTPLUG_CPU 845#ifdef CONFIG_HOTPLUG_CPU
843 846
844/* 847/*
848 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
849 * specified flavor of RCU. The callbacks will be adopted by the next
850 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
851 * comes first. Because this is invoked from the CPU_DYING notifier,
852 * irqs are already disabled.
853 */
854static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
855{
856 int i;
857 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
858
859 if (rdp->nxtlist == NULL)
860 return; /* irqs disabled, so comparison is stable. */
861 spin_lock(&rsp->onofflock); /* irqs already disabled. */
862 *rsp->orphan_cbs_tail = rdp->nxtlist;
863 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
864 rdp->nxtlist = NULL;
865 for (i = 0; i < RCU_NEXT_SIZE; i++)
866 rdp->nxttail[i] = &rdp->nxtlist;
867 rsp->orphan_qlen += rdp->qlen;
868 rdp->qlen = 0;
869 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
870}
871
872/*
873 * Adopt previously orphaned RCU callbacks.
874 */
875static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
876{
877 unsigned long flags;
878 struct rcu_data *rdp;
879
880 spin_lock_irqsave(&rsp->onofflock, flags);
881 rdp = rsp->rda[smp_processor_id()];
882 if (rsp->orphan_cbs_list == NULL) {
883 spin_unlock_irqrestore(&rsp->onofflock, flags);
884 return;
885 }
886 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
887 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
888 rdp->qlen += rsp->orphan_qlen;
889 rsp->orphan_cbs_list = NULL;
890 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
891 rsp->orphan_qlen = 0;
892 spin_unlock_irqrestore(&rsp->onofflock, flags);
893}
894
895/*
845 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 896 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
846 * and move all callbacks from the outgoing CPU to the current one. 897 * and move all callbacks from the outgoing CPU to the current one.
847 */ 898 */
848static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
849{ 900{
850 int i;
851 unsigned long flags; 901 unsigned long flags;
852 long lastcomp; 902 long lastcomp;
853 unsigned long mask; 903 unsigned long mask;
854 struct rcu_data *rdp = rsp->rda[cpu]; 904 struct rcu_data *rdp = rsp->rda[cpu];
855 struct rcu_data *rdp_me;
856 struct rcu_node *rnp; 905 struct rcu_node *rnp;
857 906
858 /* Exclude any attempts to start a new grace period. */ 907 /* Exclude any attempts to start a new grace period. */
@@ -868,39 +917,29 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
868 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 917 spin_unlock(&rnp->lock); /* irqs remain disabled. */
869 break; 918 break;
870 } 919 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp); 920
921 /*
922 * If there was a task blocking the current grace period,
923 * and if all CPUs have checked in, we need to propagate
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
872 mask = rnp->grpmask; 934 mask = rnp->grpmask;
873 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
874 rnp = rnp->parent; 936 rnp = rnp->parent;
875 } while (rnp != NULL); 937 } while (rnp != NULL);
876 lastcomp = rsp->completed; 938 lastcomp = rsp->completed;
877 939
878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 940 spin_unlock_irqrestore(&rsp->onofflock, flags);
879 941
880 /* 942 rcu_adopt_orphan_cbs(rsp);
881 * Move callbacks from the outgoing CPU to the running CPU.
882 * Note that the outgoing CPU is now quiscent, so it is now
883 * (uncharacteristically) safe to access its rcu_data structure.
884 * Note also that we must carefully retain the order of the
885 * outgoing CPU's callbacks in order for rcu_barrier() to work
886 * correctly. Finally, note that we start all the callbacks
887 * afresh, even those that have passed through a grace period
888 * and are therefore ready to invoke. The theory is that hotplug
889 * events are rare, and that if they are frequent enough to
890 * indefinitely delay callbacks, you have far worse things to
891 * be worrying about.
892 */
893 rdp_me = rsp->rda[smp_processor_id()];
894 if (rdp->nxtlist != NULL) {
895 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
896 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
897 rdp->nxtlist = NULL;
898 for (i = 0; i < RCU_NEXT_SIZE; i++)
899 rdp->nxttail[i] = &rdp->nxtlist;
900 rdp_me->qlen += rdp->qlen;
901 rdp->qlen = 0;
902 }
903 local_irq_restore(flags);
904} 943}
905 944
906/* 945/*
@@ -918,6 +957,14 @@ static void rcu_offline_cpu(int cpu)
918 957
919#else /* #ifdef CONFIG_HOTPLUG_CPU */ 958#else /* #ifdef CONFIG_HOTPLUG_CPU */
920 959
960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
961{
962}
963
964static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
965{
966}
967
921static void rcu_offline_cpu(int cpu) 968static void rcu_offline_cpu(int cpu)
922{ 969{
923} 970}
@@ -928,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
928 * Invoke any RCU callbacks that have made it to the end of their grace 975 * Invoke any RCU callbacks that have made it to the end of their grace
929 * period. Thottle as specified by rdp->blimit. 976 * period. Thottle as specified by rdp->blimit.
930 */ 977 */
931static void rcu_do_batch(struct rcu_data *rdp) 978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
932{ 979{
933 unsigned long flags; 980 unsigned long flags;
934 struct rcu_head *next, *list, **tail; 981 struct rcu_head *next, *list, **tail;
@@ -981,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
981 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
982 rdp->blimit = blimit; 1029 rdp->blimit = blimit;
983 1030
1031 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
984 local_irq_restore(flags); 1038 local_irq_restore(flags);
985 1039
986 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1040 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1050,33 +1104,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1050 int cpu; 1104 int cpu;
1051 unsigned long flags; 1105 unsigned long flags;
1052 unsigned long mask; 1106 unsigned long mask;
1053 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1107 struct rcu_node *rnp;
1054 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1055 1108
1056 for (; rnp_cur < rnp_end; rnp_cur++) { 1109 rcu_for_each_leaf_node(rsp, rnp) {
1057 mask = 0; 1110 mask = 0;
1058 spin_lock_irqsave(&rnp_cur->lock, flags); 1111 spin_lock_irqsave(&rnp->lock, flags);
1059 if (rsp->completed != lastcomp) { 1112 if (rsp->completed != lastcomp) {
1060 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1113 spin_unlock_irqrestore(&rnp->lock, flags);
1061 return 1; 1114 return 1;
1062 } 1115 }
1063 if (rnp_cur->qsmask == 0) { 1116 if (rnp->qsmask == 0) {
1064 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1117 spin_unlock_irqrestore(&rnp->lock, flags);
1065 continue; 1118 continue;
1066 } 1119 }
1067 cpu = rnp_cur->grplo; 1120 cpu = rnp->grplo;
1068 bit = 1; 1121 bit = 1;
1069 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1122 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1070 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1071 mask |= bit; 1124 mask |= bit;
1072 } 1125 }
1073 if (mask != 0 && rsp->completed == lastcomp) { 1126 if (mask != 0 && rsp->completed == lastcomp) {
1074 1127
1075 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1128 /* cpu_quiet_msk() releases rnp->lock. */
1076 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1129 cpu_quiet_msk(mask, rsp, rnp, flags);
1077 continue; 1130 continue;
1078 } 1131 }
1079 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1132 spin_unlock_irqrestore(&rnp->lock, flags);
1080 } 1133 }
1081 return 0; 1134 return 0;
1082} 1135}
@@ -1092,7 +1145,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1145 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1146 u8 signaled;
1094 1147
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1148 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1149 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1150 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1151 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1113,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1113 } 1166 }
1114 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1115 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1116 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1117 1171
1118 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1119 1173
1120 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1121 1175
@@ -1129,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1129 1183
1130 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1131 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1132 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1133 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1134 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1135 } 1190 }
@@ -1195,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1195 } 1250 }
1196 1251
1197 /* If there are callbacks ready, invoke them. */ 1252 /* If there are callbacks ready, invoke them. */
1198 rcu_do_batch(rdp); 1253 rcu_do_batch(rsp, rdp);
1199} 1254}
1200 1255
1201/* 1256/*
@@ -1251,7 +1306,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1306 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1307
1253 /* Start a new grace period if one not already started. */ 1308 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1309 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1310 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1311 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1312
@@ -1259,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1259 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1314 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1260 } 1315 }
1261 1316
1262 /* Force the grace period if too many callbacks or too long waiting. */ 1317 /*
1263 if (unlikely(++rdp->qlen > qhimark)) { 1318 * Force the grace period if too many callbacks or too long waiting.
1319 * Enforce hysteresis, and don't invoke force_quiescent_state()
1320 * if some other CPU has recently done so. Also, don't bother
1321 * invoking force_quiescent_state() if the newly enqueued callback
1322 * is the only one waiting for a grace period to complete.
1323 */
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1264 rdp->blimit = LONG_MAX; 1325 rdp->blimit = LONG_MAX;
1265 force_quiescent_state(rsp, 0); 1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1266 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1267 force_quiescent_state(rsp, 1); 1332 force_quiescent_state(rsp, 1);
1268 local_irq_restore(flags); 1333 local_irq_restore(flags);
@@ -1331,7 +1396,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1331 } 1396 }
1332 1397
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1398 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1399 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1400 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1401 rdp->n_rp_need_fqs++;
1337 return 1; 1402 return 1;
@@ -1368,6 +1433,82 @@ int rcu_needs_cpu(int cpu)
1368 rcu_preempt_needs_cpu(cpu); 1433 rcu_preempt_needs_cpu(cpu);
1369} 1434}
1370 1435
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex);
1439static struct completion rcu_barrier_completion;
1440
1441static void rcu_barrier_callback(struct rcu_head *notused)
1442{
1443 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1444 complete(&rcu_barrier_completion);
1445}
1446
1447/*
1448 * Called with preemption disabled, and from cross-cpu IRQ context.
1449 */
1450static void rcu_barrier_func(void *type)
1451{
1452 int cpu = smp_processor_id();
1453 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1454 void (*call_rcu_func)(struct rcu_head *head,
1455 void (*func)(struct rcu_head *head));
1456
1457 atomic_inc(&rcu_barrier_cpu_count);
1458 call_rcu_func = type;
1459 call_rcu_func(head, rcu_barrier_callback);
1460}
1461
1462/*
1463 * Orchestrate the specified type of RCU barrier, waiting for all
1464 * RCU callbacks of the specified type to complete.
1465 */
1466static void _rcu_barrier(struct rcu_state *rsp,
1467 void (*call_rcu_func)(struct rcu_head *head,
1468 void (*func)(struct rcu_head *head)))
1469{
1470 BUG_ON(in_interrupt());
1471 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1472 mutex_lock(&rcu_barrier_mutex);
1473 init_completion(&rcu_barrier_completion);
1474 /*
1475 * Initialize rcu_barrier_cpu_count to 1, then invoke
1476 * rcu_barrier_func() on each CPU, so that each CPU also has
1477 * incremented rcu_barrier_cpu_count. Only then is it safe to
1478 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1479 * might complete its grace period before all of the other CPUs
1480 * did their increment, causing this function to return too
1481 * early.
1482 */
1483 atomic_set(&rcu_barrier_cpu_count, 1);
1484 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1485 rcu_adopt_orphan_cbs(rsp);
1486 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1487 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1488 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1489 complete(&rcu_barrier_completion);
1490 wait_for_completion(&rcu_barrier_completion);
1491 mutex_unlock(&rcu_barrier_mutex);
1492}
1493
1494/**
1495 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1496 */
1497void rcu_barrier_bh(void)
1498{
1499 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1500}
1501EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1502
1503/**
1504 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1505 */
1506void rcu_barrier_sched(void)
1507{
1508 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1509}
1510EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1511
1371/* 1512/*
1372 * Do boot-time initialization of a CPU's per-CPU RCU data. 1513 * Do boot-time initialization of a CPU's per-CPU RCU data.
1373 */ 1514 */
@@ -1418,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1418 rdp->beenonline = 1; /* We have now been online. */ 1559 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable; 1560 rdp->preemptable = preemptable;
1420 rdp->passed_quiesc_completed = lastcomp - 1; 1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1421 rdp->blimit = blimit; 1564 rdp->blimit = blimit;
1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1565 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1423 1566
@@ -1464,6 +1607,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1464 case CPU_UP_PREPARE_FROZEN: 1607 case CPU_UP_PREPARE_FROZEN:
1465 rcu_online_cpu(cpu); 1608 rcu_online_cpu(cpu);
1466 break; 1609 break;
1610 case CPU_DYING:
1611 case CPU_DYING_FROZEN:
1612 /*
1613 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1614 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1615 * returns, all online cpus have queued rcu_barrier_func().
1616 * The dying CPU clears its cpu_online_mask bit and
1617 * moves all of its RCU callbacks to ->orphan_cbs_list
1618 * in the context of stop_machine(), so subsequent calls
1619 * to _rcu_barrier() will adopt these callbacks and only
1620 * then queue rcu_barrier_func() on all remaining CPUs.
1621 */
1622 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1623 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1624 rcu_preempt_send_cbs_to_orphanage();
1625 break;
1467 case CPU_DEAD: 1626 case CPU_DEAD:
1468 case CPU_DEAD_FROZEN: 1627 case CPU_DEAD_FROZEN:
1469 case CPU_UP_CANCELED: 1628 case CPU_UP_CANCELED:
@@ -1526,7 +1685,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1526 cpustride *= rsp->levelspread[i]; 1685 cpustride *= rsp->levelspread[i];
1527 rnp = rsp->level[i]; 1686 rnp = rsp->level[i];
1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1529 spin_lock_init(&rnp->lock); 1688 if (rnp != rcu_get_root(rsp))
1689 spin_lock_init(&rnp->lock);
1530 rnp->gpnum = 0; 1690 rnp->gpnum = 0;
1531 rnp->qsmask = 0; 1691 rnp->qsmask = 0;
1532 rnp->qsmaskinit = 0; 1692 rnp->qsmaskinit = 0;
@@ -1549,6 +1709,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1550 } 1710 }
1551 } 1711 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1552} 1713}
1553 1714
1554/* 1715/*
@@ -1558,6 +1719,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1558 */ 1719 */
1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1720#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1560do { \ 1721do { \
1722 int i; \
1723 int j; \
1724 struct rcu_node *rnp; \
1725 \
1561 rcu_init_one(rsp); \ 1726 rcu_init_one(rsp); \
1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1727 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1563 j = 0; \ 1728 j = 0; \
@@ -1570,31 +1735,8 @@ do { \
1570 } \ 1735 } \
1571} while (0) 1736} while (0)
1572 1737
1573#ifdef CONFIG_TREE_PREEMPT_RCU
1574
1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{
1588}
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1591
1592void __init __rcu_init(void) 1738void __init __rcu_init(void)
1593{ 1739{
1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1595 int j;
1596 struct rcu_node *rnp;
1597
1598 rcu_bootup_announce(); 1740 rcu_bootup_announce();
1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
@@ -1605,6 +1747,4 @@ void __init __rcu_init(void)
1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1606} 1748}
1607 1749
1608module_param(blimit, int, 0); 1750#include "rcutree_plugin.h"
1609module_param(qhimark, int, 0);
1610module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 8e8287a983c2..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -48,14 +48,14 @@
48#elif NR_CPUS <= RCU_FANOUT_SQ 48#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2 49# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1 50# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) 51# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS) 52# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0 53# define NUM_RCU_LVL_3 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE 54#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3 55# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1 56# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) 57# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) 58# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
59# define NUM_RCU_LVL_3 NR_CPUS 59# define NUM_RCU_LVL_3 NR_CPUS
60#else 60#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
@@ -79,15 +79,21 @@ struct rcu_dynticks {
79 * Definition for node within the RCU grace-period-detection hierarchy. 79 * Definition for node within the RCU grace-period-detection hierarchy.
80 */ 80 */
81struct rcu_node { 81struct rcu_node {
82 spinlock_t lock; 82 spinlock_t lock; /* Root rcu_node's lock protects some */
83 /* rcu_state fields as well as following. */
83 long gpnum; /* Current grace period for this node. */ 84 long gpnum; /* Current grace period for this node. */
84 /* This will either be equal to or one */ 85 /* This will either be equal to or one */
85 /* behind the root rcu_node's gpnum. */ 86 /* behind the root rcu_node's gpnum. */
86 unsigned long qsmask; /* CPUs or groups that need to switch in */ 87 unsigned long qsmask; /* CPUs or groups that need to switch in */
87 /* order for current grace period to proceed.*/ 88 /* order for current grace period to proceed.*/
89 /* In leaf rcu_node, each bit corresponds to */
90 /* an rcu_data structure, otherwise, each */
91 /* bit corresponds to a child rcu_node */
92 /* structure. */
88 unsigned long qsmaskinit; 93 unsigned long qsmaskinit;
89 /* Per-GP initialization for qsmask. */ 94 /* Per-GP initialization for qsmask. */
90 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 95 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 /* Only one bit will be set in this mask. */
91 int grplo; /* lowest-numbered CPU or group here. */ 97 int grplo; /* lowest-numbered CPU or group here. */
92 int grphi; /* highest-numbered CPU or group here. */ 98 int grphi; /* highest-numbered CPU or group here. */
93 u8 grpnum; /* CPU/group number for next level up. */ 99 u8 grpnum; /* CPU/group number for next level up. */
@@ -95,8 +101,23 @@ struct rcu_node {
95 struct rcu_node *parent; 101 struct rcu_node *parent;
96 struct list_head blocked_tasks[2]; 102 struct list_head blocked_tasks[2];
97 /* Tasks blocked in RCU read-side critsect. */ 103 /* Tasks blocked in RCU read-side critsect. */
104 /* Grace period number (->gpnum) x blocked */
105 /* by tasks on the (x & 0x1) element of the */
106 /* blocked_tasks[] array. */
98} ____cacheline_internodealigned_in_smp; 107} ____cacheline_internodealigned_in_smp;
99 108
109/*
110 * Do a full breadth-first scan of the rcu_node structures for the
111 * specified rcu_state structure.
112 */
113#define rcu_for_each_node_breadth_first(rsp, rnp) \
114 for ((rnp) = &(rsp)->node[0]; \
115 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
116
117#define rcu_for_each_leaf_node(rsp, rnp) \
118 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
119 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
120
100/* Index values for nxttail array in struct rcu_data. */ 121/* Index values for nxttail array in struct rcu_data. */
101#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 122#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
102#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 123#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
@@ -126,23 +147,30 @@ struct rcu_data {
126 * Any of the partitions might be empty, in which case the 147 * Any of the partitions might be empty, in which case the
127 * pointer to that partition will be equal to the pointer for 148 * pointer to that partition will be equal to the pointer for
128 * the following partition. When the list is empty, all of 149 * the following partition. When the list is empty, all of
129 * the nxttail elements point to nxtlist, which is NULL. 150 * the nxttail elements point to the ->nxtlist pointer itself,
151 * which in that case is NULL.
130 * 152 *
131 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
132 * Entries that might have arrived after current GP ended
133 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
134 * Entries known to have arrived before current GP ended
135 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
136 * Entries that batch # <= ->completed - 1: waiting for current GP
137 * [nxtlist, *nxttail[RCU_DONE_TAIL]): 153 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
138 * Entries that batch # <= ->completed 154 * Entries that batch # <= ->completed
139 * The grace period for these entries has completed, and 155 * The grace period for these entries has completed, and
140 * the other grace-period-completed entries may be moved 156 * the other grace-period-completed entries may be moved
141 * here temporarily in rcu_process_callbacks(). 157 * here temporarily in rcu_process_callbacks().
158 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
159 * Entries that batch # <= ->completed - 1: waiting for current GP
160 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
161 * Entries known to have arrived before current GP ended
162 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
163 * Entries that might have arrived after current GP ended
164 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
165 * always be NULL, as this is the end of the list.
142 */ 166 */
143 struct rcu_head *nxtlist; 167 struct rcu_head *nxtlist;
144 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 168 struct rcu_head **nxttail[RCU_NEXT_SIZE];
145 long qlen; /* # of queued callbacks */ 169 long qlen; /* # of queued callbacks */
170 long qlen_last_fqs_check;
171 /* qlen at last check for QS forcing */
172 unsigned long n_force_qs_snap;
173 /* did other CPU force QS recently? */
146 long blimit; /* Upper limit on a processed batch */ 174 long blimit; /* Upper limit on a processed batch */
147 175
148#ifdef CONFIG_NO_HZ 176#ifdef CONFIG_NO_HZ
@@ -173,9 +201,10 @@ struct rcu_data {
173}; 201};
174 202
175/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
176#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
177#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
178#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
179#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
180#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
181#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
@@ -216,8 +245,19 @@ struct rcu_state {
216 /* Force QS state. */ 245 /* Force QS state. */
217 long gpnum; /* Current gp number. */ 246 long gpnum; /* Current gp number. */
218 long completed; /* # of last completed gp. */ 247 long completed; /* # of last completed gp. */
248
249 /* End of fields guarded by root rcu_node's lock. */
250
219 spinlock_t onofflock; /* exclude on/offline and */ 251 spinlock_t onofflock; /* exclude on/offline and */
220 /* starting new GP. */ 252 /* starting new GP. Also */
253 /* protects the following */
254 /* orphan_cbs fields. */
255 struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
256 /* orphaned by all CPUs in */
257 /* a given leaf rcu_node */
258 /* going offline. */
259 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
260 long orphan_qlen; /* Number of orphaned cbs. */
221 spinlock_t fqslock; /* Only one task forcing */ 261 spinlock_t fqslock; /* Only one task forcing */
222 /* quiescent states. */ 262 /* quiescent states. */
223 unsigned long jiffies_force_qs; /* Time at which to invoke */ 263 unsigned long jiffies_force_qs; /* Time at which to invoke */
@@ -255,5 +295,30 @@ extern struct rcu_state rcu_preempt_state;
255DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); 295DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
256#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 296#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
257 297
258#endif /* #ifdef RCU_TREE_NONCORE */ 298#else /* #ifdef RCU_TREE_NONCORE */
299
300/* Forward declarations for rcutree_plugin.h */
301static inline void rcu_bootup_announce(void);
302long rcu_batches_completed(void);
303static void rcu_preempt_note_context_switch(int cpu);
304static int rcu_preempted_readers(struct rcu_node *rnp);
305#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
306static void rcu_print_task_stall(struct rcu_node *rnp);
307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
308static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
309#ifdef CONFIG_HOTPLUG_CPU
310static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
311 struct rcu_node *rnp,
312 struct rcu_data *rdp);
313static void rcu_preempt_offline_cpu(int cpu);
314#endif /* #ifdef CONFIG_HOTPLUG_CPU */
315static void rcu_preempt_check_callbacks(int cpu);
316static void rcu_preempt_process_callbacks(void);
317void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
318static int rcu_preempt_pending(int cpu);
319static int rcu_preempt_needs_cpu(int cpu);
320static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
321static void rcu_preempt_send_cbs_to_orphanage(void);
322static void __init __rcu_init_preempt(void);
259 323
324#endif /* #else #ifdef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 1cee04f627eb..ef2a58c2b9d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -150,6 +150,16 @@ void __rcu_read_lock(void)
150} 150}
151EXPORT_SYMBOL_GPL(__rcu_read_lock); 151EXPORT_SYMBOL_GPL(__rcu_read_lock);
152 152
153/*
154 * Check for preempted RCU readers blocking the current grace period
155 * for the specified rcu_node structure. If the caller needs a reliable
156 * answer, it must hold the rcu_node's ->lock.
157 */
158static int rcu_preempted_readers(struct rcu_node *rnp)
159{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
161}
162
153static void rcu_read_unlock_special(struct task_struct *t) 163static void rcu_read_unlock_special(struct task_struct *t)
154{ 164{
155 int empty; 165 int empty;
@@ -196,7 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
196 break; 206 break;
197 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 207 spin_unlock(&rnp->lock); /* irqs remain disabled. */
198 } 208 }
199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 209 empty = !rcu_preempted_readers(rnp);
200 list_del_init(&t->rcu_node_entry); 210 list_del_init(&t->rcu_node_entry);
201 t->rcu_blocked_node = NULL; 211 t->rcu_blocked_node = NULL;
202 212
@@ -207,7 +217,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
207 * drop rnp->lock and restore irq. 217 * drop rnp->lock and restore irq.
208 */ 218 */
209 if (!empty && rnp->qsmask == 0 && 219 if (!empty && rnp->qsmask == 0 &&
210 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { 220 !rcu_preempted_readers(rnp)) {
211 struct rcu_node *rnp_p; 221 struct rcu_node *rnp_p;
212 222
213 if (rnp->parent == NULL) { 223 if (rnp->parent == NULL) {
@@ -257,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
257{ 267{
258 unsigned long flags; 268 unsigned long flags;
259 struct list_head *lp; 269 struct list_head *lp;
260 int phase = rnp->gpnum & 0x1; 270 int phase;
261 struct task_struct *t; 271 struct task_struct *t;
262 272
263 if (!list_empty(&rnp->blocked_tasks[phase])) { 273 if (rcu_preempted_readers(rnp)) {
264 spin_lock_irqsave(&rnp->lock, flags); 274 spin_lock_irqsave(&rnp->lock, flags);
265 phase = rnp->gpnum & 0x1; /* re-read under lock. */ 275 phase = rnp->gpnum & 0x1;
266 lp = &rnp->blocked_tasks[phase]; 276 lp = &rnp->blocked_tasks[phase];
267 list_for_each_entry(t, lp, rcu_node_entry) 277 list_for_each_entry(t, lp, rcu_node_entry)
268 printk(" P%d", t->pid); 278 printk(" P%d", t->pid);
@@ -281,20 +291,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
281 */ 291 */
282static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 292static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
283{ 293{
284 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); 294 WARN_ON_ONCE(rcu_preempted_readers(rnp));
285 WARN_ON_ONCE(rnp->qsmask); 295 WARN_ON_ONCE(rnp->qsmask);
286} 296}
287 297
288/*
289 * Check for preempted RCU readers for the specified rcu_node structure.
290 * If the caller needs a reliable answer, it must hold the rcu_node's
291 * >lock.
292 */
293static int rcu_preempted_readers(struct rcu_node *rnp)
294{
295 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
296}
297
298#ifdef CONFIG_HOTPLUG_CPU 298#ifdef CONFIG_HOTPLUG_CPU
299 299
300/* 300/*
@@ -304,21 +304,25 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
304 * parent is to remove the need for rcu_read_unlock_special() to 304 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 305 * make more than two attempts to acquire the target rcu_node's lock.
306 * 306 *
307 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure.
309 *
307 * The caller must hold rnp->lock with irqs disabled. 310 * The caller must hold rnp->lock with irqs disabled.
308 */ 311 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 312static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 313 struct rcu_node *rnp,
311 struct rcu_data *rdp) 314 struct rcu_data *rdp)
312{ 315{
313 int i; 316 int i;
314 struct list_head *lp; 317 struct list_head *lp;
315 struct list_head *lp_root; 318 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp);
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 320 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 321 struct task_struct *tp;
318 322
319 if (rnp == rnp_root) { 323 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 324 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 325 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 326 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 327 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 328 (!list_empty(&rnp->blocked_tasks[0]) ||
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 347 }
344 } 348 }
349
350 return retval;
345} 351}
346 352
347/* 353/*
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
393EXPORT_SYMBOL_GPL(call_rcu); 399EXPORT_SYMBOL_GPL(call_rcu);
394 400
395/* 401/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the
403 * grace period, but this is the crude slow compatability hack, so just
404 * invoke synchronize_rcu().
405 */
406void synchronize_rcu_expedited(void)
407{
408 synchronize_rcu();
409}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411
412/*
396 * Check to see if there is any immediate preemptable-RCU-related work 413 * Check to see if there is any immediate preemptable-RCU-related work
397 * to be done. 414 * to be done.
398 */ 415 */
@@ -410,6 +427,15 @@ static int rcu_preempt_needs_cpu(int cpu)
410 return !!per_cpu(rcu_preempt_data, cpu).nxtlist; 427 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
411} 428}
412 429
430/**
431 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
432 */
433void rcu_barrier(void)
434{
435 _rcu_barrier(&rcu_preempt_state, call_rcu);
436}
437EXPORT_SYMBOL_GPL(rcu_barrier);
438
413/* 439/*
414 * Initialize preemptable RCU's per-CPU data. 440 * Initialize preemptable RCU's per-CPU data.
415 */ 441 */
@@ -419,6 +445,22 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
419} 445}
420 446
421/* 447/*
448 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
449 */
450static void rcu_preempt_send_cbs_to_orphanage(void)
451{
452 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
453}
454
455/*
456 * Initialize preemptable RCU's state structures.
457 */
458static void __init __rcu_init_preempt(void)
459{
460 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
461}
462
463/*
422 * Check for a task exiting while in a preemptable-RCU read-side 464 * Check for a task exiting while in a preemptable-RCU read-side
423 * critical section, clean up if so. No need to issue warnings, 465 * critical section, clean up if so. No need to issue warnings,
424 * as debug_check_no_locks_held() already does this if lockdep 466 * as debug_check_no_locks_held() already does this if lockdep
@@ -461,6 +503,15 @@ static void rcu_preempt_note_context_switch(int cpu)
461{ 503{
462} 504}
463 505
506/*
507 * Because preemptable RCU does not exist, there are never any preempted
508 * RCU readers.
509 */
510static int rcu_preempted_readers(struct rcu_node *rnp)
511{
512 return 0;
513}
514
464#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 515#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
465 516
466/* 517/*
@@ -483,25 +534,19 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
483 WARN_ON_ONCE(rnp->qsmask); 534 WARN_ON_ONCE(rnp->qsmask);
484} 535}
485 536
486/*
487 * Because preemptable RCU does not exist, there are never any preempted
488 * RCU readers.
489 */
490static int rcu_preempted_readers(struct rcu_node *rnp)
491{
492 return 0;
493}
494
495#ifdef CONFIG_HOTPLUG_CPU 537#ifdef CONFIG_HOTPLUG_CPU
496 538
497/* 539/*
498 * Because preemptable RCU does not exist, it never needs to migrate 540 * Because preemptable RCU does not exist, it never needs to migrate
499 * tasks that were blocked within RCU read-side critical sections. 541 * tasks that were blocked within RCU read-side critical sections, and
542 * such non-existent tasks cannot possibly have been blocking the current
543 * grace period.
500 */ 544 */
501static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 545static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
502 struct rcu_node *rnp, 546 struct rcu_node *rnp,
503 struct rcu_data *rdp) 547 struct rcu_data *rdp)
504{ 548{
549 return 0;
505} 550}
506 551
507/* 552/*
@@ -518,7 +563,7 @@ static void rcu_preempt_offline_cpu(int cpu)
518 * Because preemptable RCU does not exist, it never has any callbacks 563 * Because preemptable RCU does not exist, it never has any callbacks
519 * to check. 564 * to check.
520 */ 565 */
521void rcu_preempt_check_callbacks(int cpu) 566static void rcu_preempt_check_callbacks(int cpu)
522{ 567{
523} 568}
524 569
@@ -526,7 +571,7 @@ void rcu_preempt_check_callbacks(int cpu)
526 * Because preemptable RCU does not exist, it never has any callbacks 571 * Because preemptable RCU does not exist, it never has any callbacks
527 * to process. 572 * to process.
528 */ 573 */
529void rcu_preempt_process_callbacks(void) 574static void rcu_preempt_process_callbacks(void)
530{ 575{
531} 576}
532 577
@@ -540,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
540EXPORT_SYMBOL_GPL(call_rcu); 585EXPORT_SYMBOL_GPL(call_rcu);
541 586
542/* 587/*
588 * Wait for an rcu-preempt grace period, but make it happen quickly.
589 * But because preemptable RCU does not exist, map to rcu-sched.
590 */
591void synchronize_rcu_expedited(void)
592{
593 synchronize_sched_expedited();
594}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596
597/*
543 * Because preemptable RCU does not exist, it never has any work to do. 598 * Because preemptable RCU does not exist, it never has any work to do.
544 */ 599 */
545static int rcu_preempt_pending(int cpu) 600static int rcu_preempt_pending(int cpu)
@@ -556,6 +611,16 @@ static int rcu_preempt_needs_cpu(int cpu)
556} 611}
557 612
558/* 613/*
614 * Because preemptable RCU does not exist, rcu_barrier() is just
615 * another name for rcu_barrier_sched().
616 */
617void rcu_barrier(void)
618{
619 rcu_barrier_sched();
620}
621EXPORT_SYMBOL_GPL(rcu_barrier);
622
623/*
559 * Because preemptable RCU does not exist, there is no per-CPU 624 * Because preemptable RCU does not exist, there is no per-CPU
560 * data to initialize. 625 * data to initialize.
561 */ 626 */
@@ -563,4 +628,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
563{ 628{
564} 629}
565 630
631/*
632 * Because there is no preemptable RCU, there are no callbacks to move.
633 */
634static void rcu_preempt_send_cbs_to_orphanage(void)
635{
636}
637
638/*
639 * Because preemptable RCU does not exist, it need not be initialized.
640 */
641static void __init __rcu_init_preempt(void)
642{
643}
644
566#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 645#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index c89f5e9fd173..4b31c779e62e 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file)
93 return single_open(file, show_rcudata, NULL); 93 return single_open(file, show_rcudata, NULL);
94} 94}
95 95
96static struct file_operations rcudata_fops = { 96static const struct file_operations rcudata_fops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .open = rcudata_open, 98 .open = rcudata_open,
99 .read = seq_read, 99 .read = seq_read,
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file)
145 return single_open(file, show_rcudata_csv, NULL); 145 return single_open(file, show_rcudata_csv, NULL);
146} 146}
147 147
148static struct file_operations rcudata_csv_fops = { 148static const struct file_operations rcudata_csv_fops = {
149 .owner = THIS_MODULE, 149 .owner = THIS_MODULE,
150 .open = rcudata_csv_open, 150 .open = rcudata_csv_open,
151 .read = seq_read, 151 .read = seq_read,
@@ -159,13 +159,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
159 struct rcu_node *rnp; 159 struct rcu_node *rnp;
160 160
161 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " 161 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
162 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", 162 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
163 rsp->completed, rsp->gpnum, rsp->signaled, 163 rsp->completed, rsp->gpnum, rsp->signaled,
164 (long)(rsp->jiffies_force_qs - jiffies), 164 (long)(rsp->jiffies_force_qs - jiffies),
165 (int)(jiffies & 0xffff), 165 (int)(jiffies & 0xffff),
166 rsp->n_force_qs, rsp->n_force_qs_ngp, 166 rsp->n_force_qs, rsp->n_force_qs_ngp,
167 rsp->n_force_qs - rsp->n_force_qs_ngp, 167 rsp->n_force_qs - rsp->n_force_qs_ngp,
168 rsp->n_force_qs_lh); 168 rsp->n_force_qs_lh, rsp->orphan_qlen);
169 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { 169 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
170 if (rnp->level != level) { 170 if (rnp->level != level) {
171 seq_puts(m, "\n"); 171 seq_puts(m, "\n");
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file)
196 return single_open(file, show_rcuhier, NULL); 196 return single_open(file, show_rcuhier, NULL);
197} 197}
198 198
199static struct file_operations rcuhier_fops = { 199static const struct file_operations rcuhier_fops = {
200 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
201 .open = rcuhier_open, 201 .open = rcuhier_open,
202 .read = seq_read, 202 .read = seq_read,
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file)
222 return single_open(file, show_rcugp, NULL); 222 return single_open(file, show_rcugp, NULL);
223} 223}
224 224
225static struct file_operations rcugp_fops = { 225static const struct file_operations rcugp_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .open = rcugp_open, 227 .open = rcugp_open,
228 .read = seq_read, 228 .read = seq_read,
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file)
276 return single_open(file, show_rcu_pending, NULL); 276 return single_open(file, show_rcu_pending, NULL);
277} 277}
278 278
279static struct file_operations rcu_pending_fops = { 279static const struct file_operations rcu_pending_fops = {
280 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
281 .open = rcu_pending_open, 281 .open = rcu_pending_open,
282 .read = seq_read, 282 .read = seq_read,
diff --git a/kernel/relay.c b/kernel/relay.c
index bc188549788f..760c26209a3c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60/* 60/*
61 * vm_ops for relay file mappings. 61 * vm_ops for relay file mappings.
62 */ 62 */
63static struct vm_operations_struct relay_file_mmap_ops = { 63static const struct vm_operations_struct relay_file_mmap_ops = {
64 .fault = relay_buf_fault, 64 .fault = relay_buf_fault,
65 .close = relay_file_mmap_close, 65 .close = relay_file_mmap_close,
66}; 66};
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 88faec23e833..bcdabf37c40b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
37} 37}
38 38
39int res_counter_charge(struct res_counter *counter, unsigned long val, 39int res_counter_charge(struct res_counter *counter, unsigned long val,
40 struct res_counter **limit_fail_at, 40 struct res_counter **limit_fail_at)
41 struct res_counter **soft_limit_fail_at)
42{ 41{
43 int ret; 42 int ret;
44 unsigned long flags; 43 unsigned long flags;
45 struct res_counter *c, *u; 44 struct res_counter *c, *u;
46 45
47 *limit_fail_at = NULL; 46 *limit_fail_at = NULL;
48 if (soft_limit_fail_at)
49 *soft_limit_fail_at = NULL;
50 local_irq_save(flags); 47 local_irq_save(flags);
51 for (c = counter; c != NULL; c = c->parent) { 48 for (c = counter; c != NULL; c = c->parent) {
52 spin_lock(&c->lock); 49 spin_lock(&c->lock);
53 ret = res_counter_charge_locked(c, val); 50 ret = res_counter_charge_locked(c, val);
54 /*
55 * With soft limits, we return the highest ancestor
56 * that exceeds its soft limit
57 */
58 if (soft_limit_fail_at &&
59 !res_counter_soft_limit_check_locked(c))
60 *soft_limit_fail_at = c;
61 spin_unlock(&c->lock); 51 spin_unlock(&c->lock);
62 if (ret < 0) { 52 if (ret < 0) {
63 *limit_fail_at = c; 53 *limit_fail_at = c;
@@ -85,8 +75,7 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
85 counter->usage -= val; 75 counter->usage -= val;
86} 76}
87 77
88void res_counter_uncharge(struct res_counter *counter, unsigned long val, 78void res_counter_uncharge(struct res_counter *counter, unsigned long val)
89 bool *was_soft_limit_excess)
90{ 79{
91 unsigned long flags; 80 unsigned long flags;
92 struct res_counter *c; 81 struct res_counter *c;
@@ -94,9 +83,6 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val,
94 local_irq_save(flags); 83 local_irq_save(flags);
95 for (c = counter; c != NULL; c = c->parent) { 84 for (c = counter; c != NULL; c = c->parent) {
96 spin_lock(&c->lock); 85 spin_lock(&c->lock);
97 if (was_soft_limit_excess)
98 *was_soft_limit_excess =
99 !res_counter_soft_limit_check_locked(c);
100 res_counter_uncharge_locked(c, val); 86 res_counter_uncharge_locked(c, val);
101 spin_unlock(&c->lock); 87 spin_unlock(&c->lock);
102 } 88 }
diff --git a/kernel/sched.c b/kernel/sched.c
index ee61f454a98b..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -676,6 +677,7 @@ inline void update_rq_clock(struct rq *rq)
676 677
677/** 678/**
678 * runqueue_is_locked 679 * runqueue_is_locked
680 * @cpu: the processor in question.
679 * 681 *
680 * Returns true if the current cpu runqueue is locked. 682 * Returns true if the current cpu runqueue is locked.
681 * This interface allows printk to be called with the runqueue lock 683 * This interface allows printk to be called with the runqueue lock
@@ -780,7 +782,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
780 return single_open(filp, sched_feat_show, NULL); 782 return single_open(filp, sched_feat_show, NULL);
781} 783}
782 784
783static struct file_operations sched_feat_fops = { 785static const struct file_operations sched_feat_fops = {
784 .open = sched_feat_open, 786 .open = sched_feat_open,
785 .write = sched_feat_write, 787 .write = sched_feat_write,
786 .read = seq_read, 788 .read = seq_read,
@@ -1563,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1563 1565
1564#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1565 1567
1566struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1567 unsigned long rq_weight[NR_CPUS];
1568};
1569
1570static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1571 1569
1572static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1573 1571
@@ -1577,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1577static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1578 unsigned long sd_shares, 1576 unsigned long sd_shares,
1579 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1580 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1581{ 1579{
1582 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1583 int boost = 0; 1581 int boost = 0;
1584 1582
1585 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1586 if (!rq_weight) { 1584 if (!rq_weight) {
1587 boost = 1; 1585 boost = 1;
1588 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1617,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1617static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1618{ 1616{
1619 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1620 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1621 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1622 unsigned long flags; 1620 unsigned long flags;
1623 int i; 1621 int i;
@@ -1626,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1626 return 0; 1624 return 0;
1627 1625
1628 local_irq_save(flags); 1626 local_irq_save(flags);
1629 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1630 1628
1631 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1632 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1633 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1634 1632
1635 /* 1633 /*
1636 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1651,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1651 shares = tg->shares; 1649 shares = tg->shares;
1652 1650
1653 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1654 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1655 1653
1656 local_irq_restore(flags); 1654 local_irq_restore(flags);
1657 1655
@@ -1995,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1995 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1996} 1994}
1997 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
1998#ifdef CONFIG_SMP 2028#ifdef CONFIG_SMP
1999/* 2029/*
2000 * Is this task likely cache-hot: 2030 * Is this task likely cache-hot:
@@ -2007,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2007 /* 2037 /*
2008 * Buddy candidates are cache hot: 2038 * Buddy candidates are cache hot:
2009 */ 2039 */
2010 if (sched_feat(CACHE_HOT_BUDDY) && 2040 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2011 (&p->se == cfs_rq_of(&p->se)->next || 2041 (&p->se == cfs_rq_of(&p->se)->next ||
2012 &p->se == cfs_rq_of(&p->se)->last)) 2042 &p->se == cfs_rq_of(&p->se)->last))
2013 return 1; 2043 return 1;
@@ -2311,7 +2341,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2311{ 2341{
2312 int cpu, orig_cpu, this_cpu, success = 0; 2342 int cpu, orig_cpu, this_cpu, success = 0;
2313 unsigned long flags; 2343 unsigned long flags;
2314 struct rq *rq; 2344 struct rq *rq, *orig_rq;
2315 2345
2316 if (!sched_feat(SYNC_WAKEUPS)) 2346 if (!sched_feat(SYNC_WAKEUPS))
2317 wake_flags &= ~WF_SYNC; 2347 wake_flags &= ~WF_SYNC;
@@ -2319,7 +2349,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2319 this_cpu = get_cpu(); 2349 this_cpu = get_cpu();
2320 2350
2321 smp_wmb(); 2351 smp_wmb();
2322 rq = task_rq_lock(p, &flags); 2352 rq = orig_rq = task_rq_lock(p, &flags);
2323 update_rq_clock(rq); 2353 update_rq_clock(rq);
2324 if (!(p->state & state)) 2354 if (!(p->state & state))
2325 goto out; 2355 goto out;
@@ -2350,6 +2380,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2350 set_task_cpu(p, cpu); 2380 set_task_cpu(p, cpu);
2351 2381
2352 rq = task_rq_lock(p, &flags); 2382 rq = task_rq_lock(p, &flags);
2383
2384 if (rq != orig_rq)
2385 update_rq_clock(rq);
2386
2353 WARN_ON(p->state != TASK_WAKING); 2387 WARN_ON(p->state != TASK_WAKING);
2354 cpu = task_cpu(p); 2388 cpu = task_cpu(p);
2355 2389
@@ -2515,22 +2549,17 @@ void sched_fork(struct task_struct *p, int clone_flags)
2515 __sched_fork(p); 2549 __sched_fork(p);
2516 2550
2517 /* 2551 /*
2518 * Make sure we do not leak PI boosting priority to the child.
2519 */
2520 p->prio = current->normal_prio;
2521
2522 /*
2523 * Revert to default priority/policy on fork if requested. 2552 * Revert to default priority/policy on fork if requested.
2524 */ 2553 */
2525 if (unlikely(p->sched_reset_on_fork)) { 2554 if (unlikely(p->sched_reset_on_fork)) {
2526 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) 2555 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2527 p->policy = SCHED_NORMAL; 2556 p->policy = SCHED_NORMAL;
2528 2557 p->normal_prio = p->static_prio;
2529 if (p->normal_prio < DEFAULT_PRIO) 2558 }
2530 p->prio = DEFAULT_PRIO;
2531 2559
2532 if (PRIO_TO_NICE(p->static_prio) < 0) { 2560 if (PRIO_TO_NICE(p->static_prio) < 0) {
2533 p->static_prio = NICE_TO_PRIO(0); 2561 p->static_prio = NICE_TO_PRIO(0);
2562 p->normal_prio = p->static_prio;
2534 set_load_weight(p); 2563 set_load_weight(p);
2535 } 2564 }
2536 2565
@@ -2541,6 +2570,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2541 p->sched_reset_on_fork = 0; 2570 p->sched_reset_on_fork = 0;
2542 } 2571 }
2543 2572
2573 /*
2574 * Make sure we do not leak PI boosting priority to the child.
2575 */
2576 p->prio = current->normal_prio;
2577
2544 if (!rt_prio(p->prio)) 2578 if (!rt_prio(p->prio))
2545 p->sched_class = &fair_sched_class; 2579 p->sched_class = &fair_sched_class;
2546 2580
@@ -2581,8 +2615,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2581 BUG_ON(p->state != TASK_RUNNING); 2615 BUG_ON(p->state != TASK_RUNNING);
2582 update_rq_clock(rq); 2616 update_rq_clock(rq);
2583 2617
2584 p->prio = effective_prio(p);
2585
2586 if (!p->sched_class->task_new || !current->se.on_rq) { 2618 if (!p->sched_class->task_new || !current->se.on_rq) {
2587 activate_task(rq, p, 0); 2619 activate_task(rq, p, 0);
2588 } else { 2620 } else {
@@ -3658,6 +3690,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
3658 3690
3659/** 3691/**
3660 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3692 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3693 * @sd: The sched_domain whose statistics are to be updated.
3661 * @group: sched_group whose statistics are to be updated. 3694 * @group: sched_group whose statistics are to be updated.
3662 * @this_cpu: Cpu for which load balance is currently performed. 3695 * @this_cpu: Cpu for which load balance is currently performed.
3663 * @idle: Idle status of this_cpu 3696 * @idle: Idle status of this_cpu
@@ -6720,9 +6753,6 @@ EXPORT_SYMBOL(yield);
6720/* 6753/*
6721 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6754 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
6722 * that process accounting knows that this is a task in IO wait state. 6755 * that process accounting knows that this is a task in IO wait state.
6723 *
6724 * But don't do that if it is a deliberate, throttling IO wait (this task
6725 * has set its backing_dev_info: the queue against which it should throttle)
6726 */ 6756 */
6727void __sched io_schedule(void) 6757void __sched io_schedule(void)
6728{ 6758{
@@ -9406,6 +9436,10 @@ void __init sched_init(void)
9406#endif /* CONFIG_USER_SCHED */ 9436#endif /* CONFIG_USER_SCHED */
9407#endif /* CONFIG_GROUP_SCHED */ 9437#endif /* CONFIG_GROUP_SCHED */
9408 9438
9439#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9440 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9441 __alignof__(unsigned long));
9442#endif
9409 for_each_possible_cpu(i) { 9443 for_each_possible_cpu(i) {
9410 struct rq *rq; 9444 struct rq *rq;
9411 9445
@@ -9531,13 +9565,13 @@ void __init sched_init(void)
9531 current->sched_class = &fair_sched_class; 9565 current->sched_class = &fair_sched_class;
9532 9566
9533 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9567 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9534 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9568 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9535#ifdef CONFIG_SMP 9569#ifdef CONFIG_SMP
9536#ifdef CONFIG_NO_HZ 9570#ifdef CONFIG_NO_HZ
9537 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9538 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9539#endif 9573#endif
9540 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9541#endif /* SMP */ 9575#endif /* SMP */
9542 9576
9543 perf_event_init(); 9577 perf_event_init();
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index ac2e1dc708bd..479ce5682d7c 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -127,7 +127,7 @@ again:
127 clock = wrap_max(clock, min_clock); 127 clock = wrap_max(clock, min_clock);
128 clock = wrap_min(clock, max_clock); 128 clock = wrap_min(clock, max_clock);
129 129
130 if (cmpxchg(&scd->clock, old_clock, clock) != old_clock) 130 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
131 goto again; 131 goto again;
132 132
133 return clock; 133 return clock;
@@ -163,7 +163,7 @@ again:
163 val = remote_clock; 163 val = remote_clock;
164 } 164 }
165 165
166 if (cmpxchg(ptr, old_val, val) != old_val) 166 if (cmpxchg64(ptr, old_val, val) != old_val)
167 goto again; 167 goto again;
168 168
169 return val; 169 return val;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..37087a7fac22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
864 885
865 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
866 return cfs_rq->next; 887 se = cfs_rq->next;
867 888
868 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 889 /*
869 return cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
891 */
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
870 896
871 return se; 897 return se;
872} 898}
@@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1568 struct sched_entity *se = &curr->se, *pse = &p->se; 1594 struct sched_entity *se = &curr->se, *pse = &p->se;
1569 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1570 int sync = wake_flags & WF_SYNC; 1596 int sync = wake_flags & WF_SYNC;
1597 int scale = cfs_rq->nr_running >= sched_nr_latency;
1571 1598
1572 update_curr(cfs_rq); 1599 update_curr(cfs_rq);
1573 1600
@@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1582 if (unlikely(se == pse)) 1609 if (unlikely(se == pse))
1583 return; 1610 return;
1584 1611
1585 /* 1612 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1586 * Only set the backward buddy when the current task is still on the
1587 * rq. This can happen when a wakeup gets interleaved with schedule on
1588 * the ->pre_schedule() or idle_balance() point, either of which can
1589 * drop the rq lock.
1590 *
1591 * Also, during early boot the idle thread is in the fair class, for
1592 * obvious reasons its a bad idea to schedule back to the idle thread.
1593 */
1594 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1595 set_last_buddy(se);
1596 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1597 set_next_buddy(pse); 1613 set_next_buddy(pse);
1598 1614
1599 /* 1615 /*
@@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1639 1655
1640 BUG_ON(!pse); 1656 BUG_ON(!pse);
1641 1657
1642 if (wakeup_preempt_entity(se, pse) == 1) 1658 if (wakeup_preempt_entity(se, pse) == 1) {
1643 resched_task(curr); 1659 resched_task(curr);
1660 /*
1661 * Only set the backward buddy when the current task is still
1662 * on the rq. This can happen when a wakeup gets interleaved
1663 * with schedule on the ->pre_schedule() or idle_balance()
1664 * point, either of which can * drop the rq lock.
1665 *
1666 * Also, during early boot the idle thread is in the fair class,
1667 * for obvious reasons its a bad idea to schedule back to it.
1668 */
1669 if (unlikely(!se->on_rq || curr == rq->idle))
1670 return;
1671 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672 set_last_buddy(se);
1673 }
1644} 1674}
1645 1675
1646static struct task_struct *pick_next_task_fair(struct rq *rq) 1676static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1654 1684
1655 do { 1685 do {
1656 se = pick_next_entity(cfs_rq); 1686 se = pick_next_entity(cfs_rq);
1657 /*
1658 * If se was a buddy, clear it so that it will have to earn
1659 * the favour again.
1660 *
1661 * If se was not a buddy, clear the buddies because neither
1662 * was elegible to run, let them earn it again.
1663 *
1664 * IOW. unconditionally clear buddies.
1665 */
1666 __clear_buddies(cfs_rq, NULL);
1667 set_next_entity(cfs_rq, se); 1687 set_next_entity(cfs_rq, se);
1668 cfs_rq = group_cfs_rq(se); 1688 cfs_rq = group_cfs_rq(se);
1669 } while (cfs_rq); 1689 } while (cfs_rq);
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..ce17760d9c51 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid)
1110 err = session; 1110 err = session;
1111out: 1111out:
1112 write_unlock_irq(&tasklist_lock); 1112 write_unlock_irq(&tasklist_lock);
1113 if (err > 0)
1114 proc_sid_connector(group_leader);
1113 return err; 1115 return err;
1114} 1116}
1115 1117
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1546 if (arg4 | arg5) 1548 if (arg4 | arg5)
1547 return -EINVAL; 1549 return -EINVAL;
1548 switch (arg2) { 1550 switch (arg2) {
1549 case 0: 1551 case PR_MCE_KILL_CLEAR:
1550 if (arg3 != 0) 1552 if (arg3 != 0)
1551 return -EINVAL; 1553 return -EINVAL;
1552 current->flags &= ~PF_MCE_PROCESS; 1554 current->flags &= ~PF_MCE_PROCESS;
1553 break; 1555 break;
1554 case 1: 1556 case PR_MCE_KILL_SET:
1555 current->flags |= PF_MCE_PROCESS; 1557 current->flags |= PF_MCE_PROCESS;
1556 if (arg3 != 0) 1558 if (arg3 == PR_MCE_KILL_EARLY)
1557 current->flags |= PF_MCE_EARLY; 1559 current->flags |= PF_MCE_EARLY;
1558 else 1560 else if (arg3 == PR_MCE_KILL_LATE)
1559 current->flags &= ~PF_MCE_EARLY; 1561 current->flags &= ~PF_MCE_EARLY;
1562 else if (arg3 == PR_MCE_KILL_DEFAULT)
1563 current->flags &=
1564 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1565 else
1566 return -EINVAL;
1560 break; 1567 break;
1561 default: 1568 default:
1562 return -EINVAL; 1569 return -EINVAL;
1563 } 1570 }
1564 error = 0; 1571 error = 0;
1565 break; 1572 break;
1566 1573 case PR_MCE_KILL_GET:
1574 if (arg2 | arg3 | arg4 | arg5)
1575 return -EINVAL;
1576 if (current->flags & PF_MCE_PROCESS)
1577 error = (current->flags & PF_MCE_EARLY) ?
1578 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1579 else
1580 error = PR_MCE_KILL_DEFAULT;
1581 break;
1567 default: 1582 default:
1568 error = -EINVAL; 1583 error = -EINVAL;
1569 break; 1584 break;
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..b6e7aaea4604 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1521 if (!table->ctl_name && table->strategy) 1521 if (!table->ctl_name && table->strategy)
1522 set_fail(&fail, table, "Strategy without ctl_name"); 1522 set_fail(&fail, table, "Strategy without ctl_name");
1523#endif 1523#endif
1524#ifdef CONFIG_PROC_FS 1524#ifdef CONFIG_PROC_SYSCTL
1525 if (table->procname && !table->proc_handler) 1525 if (table->procname && !table->proc_handler)
1526 set_fail(&fail, table, "No proc_handler"); 1526 set_fail(&fail, table, "No proc_handler");
1527#endif 1527#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 09113347d328..5e18c6ab2c6a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -394,15 +394,11 @@ void clocksource_resume(void)
394{ 394{
395 struct clocksource *cs; 395 struct clocksource *cs;
396 396
397 mutex_lock(&clocksource_mutex);
398
399 list_for_each_entry(cs, &clocksource_list, list) 397 list_for_each_entry(cs, &clocksource_list, list)
400 if (cs->resume) 398 if (cs->resume)
401 cs->resume(); 399 cs->resume();
402 400
403 clocksource_resume_watchdog(); 401 clocksource_resume_watchdog();
404
405 mutex_unlock(&clocksource_mutex);
406} 402}
407 403
408/** 404/**
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..89aed5933ed4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
231 if (!inidle && !ts->inidle) 231 if (!inidle && !ts->inidle)
232 goto end; 232 goto end;
233 233
234 /*
235 * Set ts->inidle unconditionally. Even if the system did not
236 * switch to NOHZ mode the cpu frequency governers rely on the
237 * update of the idle time accounting in tick_nohz_start_idle().
238 */
239 ts->inidle = 1;
240
234 now = tick_nohz_start_idle(ts); 241 now = tick_nohz_start_idle(ts);
235 242
236 /* 243 /*
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
248 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 255 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
249 goto end; 256 goto end;
250 257
251 ts->inidle = 1;
252
253 if (need_resched()) 258 if (need_resched())
254 goto end; 259 goto end;
255 260
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fb0f46fa1ecd..c3a4e2907eaa 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -13,6 +13,7 @@
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/sched.h>
16#include <linux/sysdev.h> 17#include <linux/sysdev.h>
17#include <linux/clocksource.h> 18#include <linux/clocksource.h>
18#include <linux/jiffies.h> 19#include <linux/jiffies.h>
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fddd69d16e03..1b5b7aa2fdfd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
275 return single_open(filp, timer_list_show, NULL); 275 return single_open(filp, timer_list_show, NULL);
276} 276}
277 277
278static struct file_operations timer_list_fops = { 278static const struct file_operations timer_list_fops = {
279 .open = timer_list_open, 279 .open = timer_list_open,
280 .read = seq_read, 280 .read = seq_read,
281 .llseek = seq_lseek, 281 .llseek = seq_lseek,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..ee5681f8d7ec 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
395 return single_open(filp, tstats_show, NULL); 395 return single_open(filp, tstats_show, NULL);
396} 396}
397 397
398static struct file_operations tstats_fops = { 398static const struct file_operations tstats_fops = {
399 .open = tstats_open, 399 .open = tstats_open,
400 .read = seq_read, 400 .read = seq_read,
401 .write = tstats_write, 401 .write = tstats_write,
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
856} 856}
857 857
858/** 858/**
859 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
860 * @q: queue the io is for
861 * @rq: the source request
862 * @dev: target device
863 * @from: source sector
864 *
865 * Description:
866 * Device mapper remaps request to other devices.
867 * Add a trace for that action.
868 *
869 **/
870static void blk_add_trace_rq_remap(struct request_queue *q,
871 struct request *rq, dev_t dev,
872 sector_t from)
873{
874 struct blk_trace *bt = q->blk_trace;
875 struct blk_io_trace_remap r;
876
877 if (likely(!bt))
878 return;
879
880 r.device_from = cpu_to_be32(dev);
881 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
882 r.sector_from = cpu_to_be64(from);
883
884 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
885 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
886 sizeof(r), &r);
887}
888
889/**
859 * blk_add_driver_data - Add binary message with driver-specific data 890 * blk_add_driver_data - Add binary message with driver-specific data
860 * @q: queue the io is for 891 * @q: queue the io is for
861 * @rq: io request 892 * @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
922 WARN_ON(ret); 953 WARN_ON(ret);
923 ret = register_trace_block_remap(blk_add_trace_remap); 954 ret = register_trace_block_remap(blk_add_trace_remap);
924 WARN_ON(ret); 955 WARN_ON(ret);
956 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
957 WARN_ON(ret);
925} 958}
926 959
927static void blk_unregister_tracepoints(void) 960static void blk_unregister_tracepoints(void)
928{ 961{
962 unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
929 unregister_trace_block_remap(blk_add_trace_remap); 963 unregister_trace_block_remap(blk_add_trace_remap);
930 unregister_trace_block_split(blk_add_trace_split); 964 unregister_trace_block_split(blk_add_trace_split);
931 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 965 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1691 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1658} 1692}
1659 1693
1694void blk_trace_remove_sysfs(struct device *dev)
1695{
1696 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1697}
1698
1660#endif /* CONFIG_BLK_DEV_IO_TRACE */ 1699#endif /* CONFIG_BLK_DEV_IO_TRACE */
1661 1700
1662#ifdef CONFIG_EVENT_TRACING 1701#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a142579765bf..6dc4e5ef7a01 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
@@ -736,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
736 out: 740 out:
737 mutex_unlock(&ftrace_profile_lock); 741 mutex_unlock(&ftrace_profile_lock);
738 742
739 filp->f_pos += cnt; 743 *ppos += cnt;
740 744
741 return cnt; 745 return cnt;
742} 746}
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable)
1074 failed = __ftrace_replace_code(rec, enable); 1078 failed = __ftrace_replace_code(rec, enable);
1075 if (failed) { 1079 if (failed) {
1076 rec->flags |= FTRACE_FL_FAILED; 1080 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) || 1081 ftrace_bug(failed, rec->ip);
1078 !core_kernel_text(rec->ip)) { 1082 /* Stop processing */
1079 ftrace_free_rec(rec); 1083 return;
1080 } else {
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1083 return;
1084 }
1085 } 1084 }
1086 } while_for_each_ftrace_rec(); 1085 } while_for_each_ftrace_rec();
1087} 1086}
@@ -1621,8 +1620,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1621 if (!ret) { 1620 if (!ret) {
1622 struct seq_file *m = file->private_data; 1621 struct seq_file *m = file->private_data;
1623 m->private = iter; 1622 m->private = iter;
1624 } else 1623 } else {
1624 trace_parser_put(&iter->parser);
1625 kfree(iter); 1625 kfree(iter);
1626 }
1626 } else 1627 } else
1627 file->private_data = iter; 1628 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock); 1629 mutex_unlock(&ftrace_regex_lock);
@@ -2202,7 +2203,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2202 struct trace_parser *parser; 2203 struct trace_parser *parser;
2203 ssize_t ret, read; 2204 ssize_t ret, read;
2204 2205
2205 if (!cnt || cnt < 0) 2206 if (!cnt)
2206 return 0; 2207 return 0;
2207 2208
2208 mutex_lock(&ftrace_regex_lock); 2209 mutex_lock(&ftrace_regex_lock);
@@ -2216,20 +2217,20 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2216 parser = &iter->parser; 2217 parser = &iter->parser;
2217 read = trace_get_user(parser, ubuf, cnt, ppos); 2218 read = trace_get_user(parser, ubuf, cnt, ppos);
2218 2219
2219 if (trace_parser_loaded(parser) && 2220 if (read >= 0 && trace_parser_loaded(parser) &&
2220 !trace_parser_cont(parser)) { 2221 !trace_parser_cont(parser)) {
2221 ret = ftrace_process_regex(parser->buffer, 2222 ret = ftrace_process_regex(parser->buffer,
2222 parser->idx, enable); 2223 parser->idx, enable);
2223 if (ret) 2224 if (ret)
2224 goto out; 2225 goto out_unlock;
2225 2226
2226 trace_parser_clear(parser); 2227 trace_parser_clear(parser);
2227 } 2228 }
2228 2229
2229 ret = read; 2230 ret = read;
2230 2231out_unlock:
2231 mutex_unlock(&ftrace_regex_lock); 2232 mutex_unlock(&ftrace_regex_lock);
2232out: 2233
2233 return ret; 2234 return ret;
2234} 2235}
2235 2236
@@ -2552,8 +2553,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2552 size_t cnt, loff_t *ppos) 2553 size_t cnt, loff_t *ppos)
2553{ 2554{
2554 struct trace_parser parser; 2555 struct trace_parser parser;
2555 size_t read = 0; 2556 ssize_t read, ret;
2556 ssize_t ret;
2557 2557
2558 if (!cnt || cnt < 0) 2558 if (!cnt || cnt < 0)
2559 return 0; 2559 return 0;
@@ -2562,29 +2562,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2562 2562
2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { 2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2564 ret = -EBUSY; 2564 ret = -EBUSY;
2565 goto out; 2565 goto out_unlock;
2566 } 2566 }
2567 2567
2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2569 ret = -ENOMEM; 2569 ret = -ENOMEM;
2570 goto out; 2570 goto out_unlock;
2571 } 2571 }
2572 2572
2573 read = trace_get_user(&parser, ubuf, cnt, ppos); 2573 read = trace_get_user(&parser, ubuf, cnt, ppos);
2574 2574
2575 if (trace_parser_loaded((&parser))) { 2575 if (read >= 0 && trace_parser_loaded((&parser))) {
2576 parser.buffer[parser.idx] = 0; 2576 parser.buffer[parser.idx] = 0;
2577 2577
2578 /* we allow only one expression at a time */ 2578 /* we allow only one expression at a time */
2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2580 parser.buffer); 2580 parser.buffer);
2581 if (ret) 2581 if (ret)
2582 goto out; 2582 goto out_free;
2583 } 2583 }
2584 2584
2585 ret = read; 2585 ret = read;
2586 out: 2586
2587out_free:
2587 trace_parser_put(&parser); 2588 trace_parser_put(&parser);
2589out_unlock:
2588 mutex_unlock(&graph_lock); 2590 mutex_unlock(&graph_lock);
2589 2591
2590 return ret; 2592 return ret;
@@ -2655,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
2655} 2657}
2656 2658
2657#ifdef CONFIG_MODULES 2659#ifdef CONFIG_MODULES
2658void ftrace_release(void *start, void *end) 2660void ftrace_release_mod(struct module *mod)
2659{ 2661{
2660 struct dyn_ftrace *rec; 2662 struct dyn_ftrace *rec;
2661 struct ftrace_page *pg; 2663 struct ftrace_page *pg;
2662 unsigned long s = (unsigned long)start;
2663 unsigned long e = (unsigned long)end;
2664 2664
2665 if (ftrace_disabled || !start || start == end) 2665 if (ftrace_disabled)
2666 return; 2666 return;
2667 2667
2668 mutex_lock(&ftrace_lock); 2668 mutex_lock(&ftrace_lock);
2669 do_for_each_ftrace_rec(pg, rec) { 2669 do_for_each_ftrace_rec(pg, rec) {
2670 if ((rec->ip >= s) && (rec->ip < e)) { 2670 if (within_module_core(rec->ip, mod)) {
2671 /* 2671 /*
2672 * rec->ip is changed in ftrace_free_rec() 2672 * rec->ip is changed in ftrace_free_rec()
2673 * It should not between s and e if record was freed. 2673 * It should not between s and e if record was freed.
@@ -2699,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
2699 mod->num_ftrace_callsites); 2699 mod->num_ftrace_callsites);
2700 break; 2700 break;
2701 case MODULE_STATE_GOING: 2701 case MODULE_STATE_GOING:
2702 ftrace_release(mod->ftrace_callsites, 2702 ftrace_release_mod(mod);
2703 mod->ftrace_callsites +
2704 mod->num_ftrace_callsites);
2705 break; 2703 break;
2706 } 2704 }
2707 2705
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..5dd017fea6f5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -483,7 +483,7 @@ struct ring_buffer_iter {
483/* Up this if you want to test the TIME_EXTENTS and normalization */ 483/* Up this if you want to test the TIME_EXTENTS and normalization */
484#define DEBUG_SHIFT 0 484#define DEBUG_SHIFT 0
485 485
486static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 486static inline u64 rb_time_stamp(struct ring_buffer *buffer)
487{ 487{
488 /* shift to debug/test normalization and TIME_EXTENTS */ 488 /* shift to debug/test normalization and TIME_EXTENTS */
489 return buffer->clock() << DEBUG_SHIFT; 489 return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
494 u64 time; 494 u64 time;
495 495
496 preempt_disable_notrace(); 496 preempt_disable_notrace();
497 time = rb_time_stamp(buffer, cpu); 497 time = rb_time_stamp(buffer);
498 preempt_enable_no_resched_notrace(); 498 preempt_enable_no_resched_notrace();
499 499
500 return time; 500 return time;
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
599} 599}
600 600
601/* 601/*
602 * rb_is_head_page - test if the give page is the head page 602 * rb_is_head_page - test if the given page is the head page
603 * 603 *
604 * Because the reader may move the head_page pointer, we can 604 * Because the reader may move the head_page pointer, we can
605 * not trust what the head page is (it may be pointing to 605 * not trust what the head page is (it may be pointing to
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 atomic_inc(&cpu_buffer->record_disabled); 1193 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched(); 1194 synchronize_sched();
1195 1195
1196 spin_lock_irq(&cpu_buffer->reader_lock);
1196 rb_head_page_deactivate(cpu_buffer); 1197 rb_head_page_deactivate(cpu_buffer);
1197 1198
1198 for (i = 0; i < nr_pages; i++) { 1199 for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1207 return; 1208 return;
1208 1209
1209 rb_reset_cpu(cpu_buffer); 1210 rb_reset_cpu(cpu_buffer);
1211 spin_unlock_irq(&cpu_buffer->reader_lock);
1210 1212
1211 rb_check_pages(cpu_buffer); 1213 rb_check_pages(cpu_buffer);
1212 1214
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1868 * Nested commits always have zero deltas, so 1870 * Nested commits always have zero deltas, so
1869 * just reread the time stamp 1871 * just reread the time stamp
1870 */ 1872 */
1871 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1873 *ts = rb_time_stamp(buffer);
1872 next_page->page->time_stamp = *ts; 1874 next_page->page->time_stamp = *ts;
1873 } 1875 }
1874 1876
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2112 goto out_fail; 2114 goto out_fail;
2113 2115
2114 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2116 ts = rb_time_stamp(cpu_buffer->buffer);
2115 2117
2116 /* 2118 /*
2117 * Only the first commit can update the timestamp. 2119 * Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2681EXPORT_SYMBOL_GPL(ring_buffer_entries); 2683EXPORT_SYMBOL_GPL(ring_buffer_entries);
2682 2684
2683/** 2685/**
2684 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2686 * ring_buffer_overruns - get the number of overruns in buffer
2685 * @buffer: The ring buffer 2687 * @buffer: The ring buffer
2686 * 2688 *
2687 * Returns the total number of overruns in the ring buffer 2689 * Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 411af37f4be4..b20d3ec75de9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -415,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
415 415
416 /* read the non-space input */ 416 /* read the non-space input */
417 while (cnt && !isspace(ch)) { 417 while (cnt && !isspace(ch)) {
418 if (parser->idx < parser->size) 418 if (parser->idx < parser->size - 1)
419 parser->buffer[parser->idx++] = ch; 419 parser->buffer[parser->idx++] = ch;
420 else { 420 else {
421 ret = -EINVAL; 421 ret = -EINVAL;
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
1393 1393
1394int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1394int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1395{ 1395{
1396 return trace_array_printk(&global_trace, ip, fmt, args); 1396 return trace_array_vprintk(&global_trace, ip, fmt, args);
1397} 1397}
1398EXPORT_SYMBOL_GPL(trace_vprintk); 1398EXPORT_SYMBOL_GPL(trace_vprintk);
1399 1399
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2440 return ret; 2440 return ret;
2441 } 2441 }
2442 2442
2443 filp->f_pos += cnt; 2443 *ppos += cnt;
2444 2444
2445 return cnt; 2445 return cnt;
2446} 2446}
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2582 } 2582 }
2583 mutex_unlock(&trace_types_lock); 2583 mutex_unlock(&trace_types_lock);
2584 2584
2585 filp->f_pos += cnt; 2585 *ppos += cnt;
2586 2586
2587 return cnt; 2587 return cnt;
2588} 2588}
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2764 if (err) 2764 if (err)
2765 return err; 2765 return err;
2766 2766
2767 filp->f_pos += ret; 2767 *ppos += ret;
2768 2768
2769 return ret; 2769 return ret;
2770} 2770}
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3299 } 3299 }
3300 } 3300 }
3301 3301
3302 filp->f_pos += cnt; 3302 *ppos += cnt;
3303 3303
3304 /* If check pages failed, return ENOMEM */ 3304 /* If check pages failed, return ENOMEM */
3305 if (tracing_disabled) 3305 if (tracing_disabled)
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a9..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct ring_buffer_event *event; 35 struct ring_buffer_event *event;
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer;
37 unsigned long flags; 38 unsigned long flags;
38 int cpu, pc; 39 int cpu, pc;
39 const char *p; 40 const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
54 goto out; 55 goto out;
55 56
56 pc = preempt_count(); 57 pc = preempt_count();
57 event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, 58 buffer = tr->buffer;
59 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
58 sizeof(*entry), flags, pc); 60 sizeof(*entry), flags, pc);
59 if (!event) 61 if (!event)
60 goto out; 62 goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 entry->line = f->line; 76 entry->line = f->line;
75 entry->correct = val == expect; 77 entry->correct = val == expect;
76 78
77 if (!filter_check_discard(call, entry, tr->buffer, event)) 79 if (!filter_check_discard(call, entry, buffer, event))
78 ring_buffer_unlock_commit(tr->buffer, event); 80 ring_buffer_unlock_commit(buffer, event);
79 81
80 out: 82 out:
81 atomic_dec(&tr->data[cpu]->disabled); 83 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..8d5c171cc998 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
31 if (atomic_inc_return(&event->profile_count)) 31 if (atomic_inc_return(&event->profile_count))
32 return 0; 32 return 0;
33 33
34 if (!total_profile_count++) { 34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t); 35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf) 36 if (!buf)
37 goto fail_buf; 37 goto fail_buf;
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
46 } 46 }
47 47
48 ret = event->profile_enable(); 48 ret = event->profile_enable();
49 if (!ret) 49 if (!ret) {
50 total_profile_count++;
50 return 0; 51 return 0;
52 }
51 53
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi: 54fail_buf_nmi:
54 kfree(trace_profile_buf); 55 if (!total_profile_count) {
56 free_percpu(trace_profile_buf_nmi);
57 free_percpu(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
55fail_buf: 61fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count); 62 atomic_dec(&event->profile_count);
58 63
59 return ret; 64 return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6f03c8a1105e..d128f65778e6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
232 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
233{ 233{
234 struct trace_parser parser; 234 struct trace_parser parser;
235 size_t read = 0; 235 ssize_t read, ret;
236 ssize_t ret;
237 236
238 if (!cnt || cnt < 0) 237 if (!cnt)
239 return 0; 238 return 0;
240 239
241 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
247 246
248 read = trace_get_user(&parser, ubuf, cnt, ppos); 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
249 248
250 if (trace_parser_loaded((&parser))) { 249 if (read >= 0 && trace_parser_loaded((&parser))) {
251 int set = 1; 250 int set = 1;
252 251
253 if (*parser.buffer == '!') 252 if (*parser.buffer == '!')
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927f..98a6cc5c64ed 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps)
933 933
934 while (!list_empty(&ps->postfix)) { 934 while (!list_empty(&ps->postfix)) {
935 elt = list_first_entry(&ps->postfix, struct postfix_elt, list); 935 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
936 kfree(elt->operand);
937 list_del(&elt->list); 936 list_del(&elt->list);
937 kfree(elt->operand);
938 kfree(elt);
938 } 939 }
939} 940}
940 941
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 23b63859130e..69543a905cd5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
165 struct ftrace_event_call *call = &event_hw_branch; 165 struct ftrace_event_call *call = &event_hw_branch;
166 struct trace_array *tr = hw_branch_trace; 166 struct trace_array *tr = hw_branch_trace;
167 struct ring_buffer_event *event; 167 struct ring_buffer_event *event;
168 struct ring_buffer *buf;
168 struct hw_branch_entry *entry; 169 struct hw_branch_entry *entry;
169 unsigned long irq1; 170 unsigned long irq1;
170 int cpu; 171 int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
180 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 181 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
181 goto out; 182 goto out;
182 183
183 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, 184 buf = tr->buffer;
185 event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
184 sizeof(*entry), 0, 0); 186 sizeof(*entry), 0, 0);
185 if (!event) 187 if (!event)
186 goto out; 188 goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
189 entry->ent.type = TRACE_HW_BRANCHES; 191 entry->ent.type = TRACE_HW_BRANCHES;
190 entry->from = from; 192 entry->from = from;
191 entry->to = to; 193 entry->to = to;
192 if (!filter_check_discard(call, entry, tr->buffer, event)) 194 if (!filter_check_discard(call, entry, buf, event))
193 trace_buffer_unlock_commit(tr, event, 0, 0); 195 trace_buffer_unlock_commit(buf, event, 0, 0);
194 196
195 out: 197 out:
196 atomic_dec(&tr->data[cpu]->disabled); 198 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1e..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
69 * @s: trace sequence descriptor 69 * @s: trace sequence descriptor
70 * @fmt: printf format string 70 * @fmt: printf format string
71 * 71 *
72 * It returns 0 if the trace oversizes the buffer's free
73 * space, 1 otherwise.
74 *
72 * The tracer may use either sequence operations or its own 75 * The tracer may use either sequence operations or its own
73 * copy to user routines. To simplify formating of a trace 76 * copy to user routines. To simplify formating of a trace
74 * trace_seq_printf is used to store strings into a special 77 * trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
95 98
96 s->len += ret; 99 s->len += ret;
97 100
98 return len; 101 return 1;
99} 102}
100EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
101 104
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
486 hardirq ? 'h' : softirq ? 's' : '.')) 489 hardirq ? 'h' : softirq ? 's' : '.'))
487 return 0; 490 return 0;
488 491
489 if (entry->lock_depth < 0) 492 if (entry->preempt_count)
490 ret = trace_seq_putc(s, '.'); 493 ret = trace_seq_printf(s, "%x", entry->preempt_count);
491 else 494 else
492 ret = trace_seq_printf(s, "%d", entry->lock_depth); 495 ret = trace_seq_putc(s, '.');
496
493 if (!ret) 497 if (!ret)
494 return 0; 498 return 0;
495 499
496 if (entry->preempt_count) 500 if (entry->lock_depth < 0)
497 return trace_seq_printf(s, "%x", entry->preempt_count); 501 return trace_seq_putc(s, '.');
498 return trace_seq_putc(s, '.'); 502
503 return trace_seq_printf(s, "%d", entry->lock_depth);
499} 504}
500 505
501static int 506static int
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
883 trace_assign_type(field, iter->ent); 888 trace_assign_type(field, iter->ent);
884 889
885 if (!S) 890 if (!S)
886 task_state_char(field->prev_state); 891 S = task_state_char(field->prev_state);
887 T = task_state_char(field->next_state); 892 T = task_state_char(field->next_state);
888 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 893 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
889 field->prev_pid, 894 field->prev_pid,
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
918 trace_assign_type(field, iter->ent); 923 trace_assign_type(field, iter->ent);
919 924
920 if (!S) 925 if (!S)
921 task_state_char(field->prev_state); 926 S = task_state_char(field->prev_state);
922 T = task_state_char(field->next_state); 927 T = task_state_char(field->next_state);
923 928
924 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 929 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9fbce6c9d2e1..527e17eae575 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", 167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
168 SYSCALL_FIELD(int, nr), 168 SYSCALL_FIELD(int, nr),
169 SYSCALL_FIELD(unsigned long, ret)); 169 SYSCALL_FIELD(long, ret));
170 if (!ret) 170 if (!ret)
171 return 0; 171 return 0;
172 172
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
212 if (ret) 212 if (ret)
213 return ret; 213 return ret;
214 214
215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, 215 ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
216 FILTER_OTHER); 216 FILTER_OTHER);
217 217
218 return ret; 218 return ret;
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
640EXPORT_SYMBOL(schedule_delayed_work); 640EXPORT_SYMBOL(schedule_delayed_work);
641 641
642/** 642/**
643 * flush_delayed_work - block until a dwork_struct's callback has terminated
644 * @dwork: the delayed work which is to be flushed
645 *
646 * Any timeout is cancelled, and any pending work is run immediately.
647 */
648void flush_delayed_work(struct delayed_work *dwork)
649{
650 if (del_timer_sync(&dwork->timer)) {
651 struct cpu_workqueue_struct *cwq;
652 cwq = wq_per_cpu(keventd_wq, get_cpu());
653 __queue_work(cwq, &dwork->work);
654 put_cpu();
655 }
656 flush_work(&dwork->work);
657}
658EXPORT_SYMBOL(flush_delayed_work);
659
660/**
643 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
644 * @cpu: cpu to use 662 * @cpu: cpu to use
645 * @dwork: job to be done 663 * @dwork: job to be done
@@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
667int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
668{ 686{
669 int cpu; 687 int cpu;
688 int orig = -1;
670 struct work_struct *works; 689 struct work_struct *works;
671 690
672 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
674 return -ENOMEM; 693 return -ENOMEM;
675 694
676 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
677 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
678 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
679 707
680 INIT_WORK(work, func); 708 INIT_WORK(work, func);
681 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
682 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
683 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
684 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
685 put_online_cpus(); 718 put_online_cpus();
686 free_percpu(works); 719 free_percpu(works);
687 return 0; 720 return 0;