aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c18
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c201
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c7
-rw-r--r--kernel/irq/handle.c36
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c223
-rw-r--r--kernel/irq/numa_migrate.c11
-rw-r--r--kernel/irq/pm.c79
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kexec.c19
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/latencytop.c83
-rw-r--r--kernel/lockdep.c21
-rw-r--r--kernel/module.c25
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/power/disk.c139
-rw-r--r--kernel/power/main.c55
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/power/swsusp.c18
-rw-r--r--kernel/rcutorture.c25
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched.c1004
-rw-r--r--kernel/sched_clock.c31
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h3
-rw-r--r--kernel/sched_rt.c537
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c20
-rw-r--r--kernel/time/clocksource.c76
-rw-r--r--kernel/time/ntp.c444
-rw-r--r--kernel/time/timecompare.c191
-rw-r--r--kernel/timer.c110
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c6
46 files changed, 2421 insertions, 1105 deletions
diff --git a/kernel/async.c b/kernel/async.c
index f565891f2c9b..968ef9457d4e 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -49,6 +49,7 @@ asynchronous and synchronous parts of the kernel.
49*/ 49*/
50 50
51#include <linux/async.h> 51#include <linux/async.h>
52#include <linux/bug.h>
52#include <linux/module.h> 53#include <linux/module.h>
53#include <linux/wait.h> 54#include <linux/wait.h>
54#include <linux/sched.h> 55#include <linux/sched.h>
@@ -387,20 +388,11 @@ static int async_manager_thread(void *unused)
387 388
388static int __init async_init(void) 389static int __init async_init(void)
389{ 390{
390 if (async_enabled) 391 async_enabled =
391 if (IS_ERR(kthread_run(async_manager_thread, NULL, 392 !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
392 "async/mgr")))
393 async_enabled = 0;
394 return 0;
395}
396 393
397static int __init setup_async(char *str) 394 WARN_ON(!async_enabled);
398{ 395 return 0;
399 async_enabled = 1;
400 return 1;
401} 396}
402 397
403__setup("fastboot", setup_async);
404
405
406core_initcall(async_init); 398core_initcall(async_init);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b4..c500ca7239b2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1071,7 +1071,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1071 mutex_unlock(&cgroup_mutex); 1071 mutex_unlock(&cgroup_mutex);
1072 } 1072 }
1073 1073
1074 return simple_set_mnt(mnt, sb); 1074 simple_set_mnt(mnt, sb);
1075 return 0;
1075 1076
1076 free_cg_links: 1077 free_cg_links:
1077 free_cg_links(&tmp_cg_links); 1078 free_cg_links(&tmp_cg_links);
@@ -1627,7 +1628,7 @@ static struct inode_operations cgroup_dir_inode_operations = {
1627static int cgroup_create_file(struct dentry *dentry, int mode, 1628static int cgroup_create_file(struct dentry *dentry, int mode,
1628 struct super_block *sb) 1629 struct super_block *sb)
1629{ 1630{
1630 static struct dentry_operations cgroup_dops = { 1631 static const struct dentry_operations cgroup_dops = {
1631 .d_iput = cgroup_diput, 1632 .d_iput = cgroup_diput,
1632 }; 1633 };
1633 1634
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 79e40f00dcb8..395b6974dc8d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
281 goto out; 281 goto out;
282 } 282 }
283 283
284 cpu_clear(cpu, cpu_active_map); 284 set_cpu_active(cpu, false);
285 285
286 /* 286 /*
287 * Make sure the all cpus did the reschedule and are not 287 * Make sure the all cpus did the reschedule and are not
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
296 err = _cpu_down(cpu, 0); 296 err = _cpu_down(cpu, 0);
297 297
298 if (cpu_online(cpu)) 298 if (cpu_online(cpu))
299 cpu_set(cpu, cpu_active_map); 299 set_cpu_active(cpu, true);
300 300
301out: 301out:
302 cpu_maps_update_done(); 302 cpu_maps_update_done();
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
333 goto out_notify; 333 goto out_notify;
334 BUG_ON(!cpu_online(cpu)); 334 BUG_ON(!cpu_online(cpu));
335 335
336 cpu_set(cpu, cpu_active_map); 336 set_cpu_active(cpu, true);
337 337
338 /* Now call notifier in preparation. */ 338 /* Now call notifier in preparation. */
339 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 339 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6715ebc3761d..47c15840a381 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -284,7 +284,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
284 mm->free_area_cache = oldmm->mmap_base; 284 mm->free_area_cache = oldmm->mmap_base;
285 mm->cached_hole_size = ~0UL; 285 mm->cached_hole_size = ~0UL;
286 mm->map_count = 0; 286 mm->map_count = 0;
287 cpus_clear(mm->cpu_vm_mask); 287 cpumask_clear(mm_cpumask(mm));
288 mm->mm_rb = RB_ROOT; 288 mm->mm_rb = RB_ROOT;
289 rb_link = &mm->mm_rb.rb_node; 289 rb_link = &mm->mm_rb.rb_node;
290 rb_parent = NULL; 290 rb_parent = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 438701adce23..6b50a024bca2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -114,7 +114,9 @@ struct futex_q {
114}; 114};
115 115
116/* 116/*
117 * Split the global futex_lock into every hash list lock. 117 * Hash buckets are shared by all the futex_keys that hash to the same
118 * location. Each key may have multiple futex_q structures, one for each task
119 * waiting on a futex.
118 */ 120 */
119struct futex_hash_bucket { 121struct futex_hash_bucket {
120 spinlock_t lock; 122 spinlock_t lock;
@@ -189,8 +191,7 @@ static void drop_futex_key_refs(union futex_key *key)
189/** 191/**
190 * get_futex_key - Get parameters which are the keys for a futex. 192 * get_futex_key - Get parameters which are the keys for a futex.
191 * @uaddr: virtual address of the futex 193 * @uaddr: virtual address of the futex
192 * @shared: NULL for a PROCESS_PRIVATE futex, 194 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
193 * &current->mm->mmap_sem for a PROCESS_SHARED futex
194 * @key: address where result is stored. 195 * @key: address where result is stored.
195 * 196 *
196 * Returns a negative error code or 0 197 * Returns a negative error code or 0
@@ -200,9 +201,7 @@ static void drop_futex_key_refs(union futex_key *key)
200 * offset_within_page). For private mappings, it's (uaddr, current->mm). 201 * offset_within_page). For private mappings, it's (uaddr, current->mm).
201 * We can usually work out the index without swapping in the page. 202 * We can usually work out the index without swapping in the page.
202 * 203 *
203 * fshared is NULL for PROCESS_PRIVATE futexes 204 * lock_page() might sleep, the caller should not hold a spinlock.
204 * For other futexes, it points to &current->mm->mmap_sem and
205 * caller must have taken the reader lock. but NOT any spinlocks.
206 */ 205 */
207static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) 206static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
208{ 207{
@@ -299,41 +298,6 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
299 return ret ? -EFAULT : 0; 298 return ret ? -EFAULT : 0;
300} 299}
301 300
302/*
303 * Fault handling.
304 */
305static int futex_handle_fault(unsigned long address, int attempt)
306{
307 struct vm_area_struct * vma;
308 struct mm_struct *mm = current->mm;
309 int ret = -EFAULT;
310
311 if (attempt > 2)
312 return ret;
313
314 down_read(&mm->mmap_sem);
315 vma = find_vma(mm, address);
316 if (vma && address >= vma->vm_start &&
317 (vma->vm_flags & VM_WRITE)) {
318 int fault;
319 fault = handle_mm_fault(mm, vma, address, 1);
320 if (unlikely((fault & VM_FAULT_ERROR))) {
321#if 0
322 /* XXX: let's do this when we verify it is OK */
323 if (ret & VM_FAULT_OOM)
324 ret = -ENOMEM;
325#endif
326 } else {
327 ret = 0;
328 if (fault & VM_FAULT_MAJOR)
329 current->maj_flt++;
330 else
331 current->min_flt++;
332 }
333 }
334 up_read(&mm->mmap_sem);
335 return ret;
336}
337 301
338/* 302/*
339 * PI code: 303 * PI code:
@@ -589,10 +553,9 @@ static void wake_futex(struct futex_q *q)
589 * The waiting task can free the futex_q as soon as this is written, 553 * The waiting task can free the futex_q as soon as this is written,
590 * without taking any locks. This must come last. 554 * without taking any locks. This must come last.
591 * 555 *
592 * A memory barrier is required here to prevent the following store 556 * A memory barrier is required here to prevent the following store to
593 * to lock_ptr from getting ahead of the wakeup. Clearing the lock 557 * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
594 * at the end of wake_up_all() does not prevent this store from 558 * end of wake_up() does not prevent this store from moving.
595 * moving.
596 */ 559 */
597 smp_wmb(); 560 smp_wmb();
598 q->lock_ptr = NULL; 561 q->lock_ptr = NULL;
@@ -692,9 +655,16 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
692 } 655 }
693} 656}
694 657
658static inline void
659double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
660{
661 spin_unlock(&hb1->lock);
662 if (hb1 != hb2)
663 spin_unlock(&hb2->lock);
664}
665
695/* 666/*
696 * Wake up all waiters hashed on the physical page that is mapped 667 * Wake up waiters matching bitset queued on this futex (uaddr).
697 * to this virtual address:
698 */ 668 */
699static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) 669static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
700{ 670{
@@ -750,9 +720,9 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
750 struct futex_hash_bucket *hb1, *hb2; 720 struct futex_hash_bucket *hb1, *hb2;
751 struct plist_head *head; 721 struct plist_head *head;
752 struct futex_q *this, *next; 722 struct futex_q *this, *next;
753 int ret, op_ret, attempt = 0; 723 int ret, op_ret;
754 724
755retryfull: 725retry:
756 ret = get_futex_key(uaddr1, fshared, &key1); 726 ret = get_futex_key(uaddr1, fshared, &key1);
757 if (unlikely(ret != 0)) 727 if (unlikely(ret != 0))
758 goto out; 728 goto out;
@@ -763,16 +733,13 @@ retryfull:
763 hb1 = hash_futex(&key1); 733 hb1 = hash_futex(&key1);
764 hb2 = hash_futex(&key2); 734 hb2 = hash_futex(&key2);
765 735
766retry:
767 double_lock_hb(hb1, hb2); 736 double_lock_hb(hb1, hb2);
768 737retry_private:
769 op_ret = futex_atomic_op_inuser(op, uaddr2); 738 op_ret = futex_atomic_op_inuser(op, uaddr2);
770 if (unlikely(op_ret < 0)) { 739 if (unlikely(op_ret < 0)) {
771 u32 dummy; 740 u32 dummy;
772 741
773 spin_unlock(&hb1->lock); 742 double_unlock_hb(hb1, hb2);
774 if (hb1 != hb2)
775 spin_unlock(&hb2->lock);
776 743
777#ifndef CONFIG_MMU 744#ifndef CONFIG_MMU
778 /* 745 /*
@@ -788,26 +755,16 @@ retry:
788 goto out_put_keys; 755 goto out_put_keys;
789 } 756 }
790 757
791 /*
792 * futex_atomic_op_inuser needs to both read and write
793 * *(int __user *)uaddr2, but we can't modify it
794 * non-atomically. Therefore, if get_user below is not
795 * enough, we need to handle the fault ourselves, while
796 * still holding the mmap_sem.
797 */
798 if (attempt++) {
799 ret = futex_handle_fault((unsigned long)uaddr2,
800 attempt);
801 if (ret)
802 goto out_put_keys;
803 goto retry;
804 }
805
806 ret = get_user(dummy, uaddr2); 758 ret = get_user(dummy, uaddr2);
807 if (ret) 759 if (ret)
808 return ret; 760 goto out_put_keys;
761
762 if (!fshared)
763 goto retry_private;
809 764
810 goto retryfull; 765 put_futex_key(fshared, &key2);
766 put_futex_key(fshared, &key1);
767 goto retry;
811 } 768 }
812 769
813 head = &hb1->chain; 770 head = &hb1->chain;
@@ -834,9 +791,7 @@ retry:
834 ret += op_ret; 791 ret += op_ret;
835 } 792 }
836 793
837 spin_unlock(&hb1->lock); 794 double_unlock_hb(hb1, hb2);
838 if (hb1 != hb2)
839 spin_unlock(&hb2->lock);
840out_put_keys: 795out_put_keys:
841 put_futex_key(fshared, &key2); 796 put_futex_key(fshared, &key2);
842out_put_key1: 797out_put_key1:
@@ -869,6 +824,7 @@ retry:
869 hb1 = hash_futex(&key1); 824 hb1 = hash_futex(&key1);
870 hb2 = hash_futex(&key2); 825 hb2 = hash_futex(&key2);
871 826
827retry_private:
872 double_lock_hb(hb1, hb2); 828 double_lock_hb(hb1, hb2);
873 829
874 if (likely(cmpval != NULL)) { 830 if (likely(cmpval != NULL)) {
@@ -877,16 +833,18 @@ retry:
877 ret = get_futex_value_locked(&curval, uaddr1); 833 ret = get_futex_value_locked(&curval, uaddr1);
878 834
879 if (unlikely(ret)) { 835 if (unlikely(ret)) {
880 spin_unlock(&hb1->lock); 836 double_unlock_hb(hb1, hb2);
881 if (hb1 != hb2)
882 spin_unlock(&hb2->lock);
883 837
884 ret = get_user(curval, uaddr1); 838 ret = get_user(curval, uaddr1);
839 if (ret)
840 goto out_put_keys;
885 841
886 if (!ret) 842 if (!fshared)
887 goto retry; 843 goto retry_private;
888 844
889 goto out_put_keys; 845 put_futex_key(fshared, &key2);
846 put_futex_key(fshared, &key1);
847 goto retry;
890 } 848 }
891 if (curval != *cmpval) { 849 if (curval != *cmpval) {
892 ret = -EAGAIN; 850 ret = -EAGAIN;
@@ -923,9 +881,7 @@ retry:
923 } 881 }
924 882
925out_unlock: 883out_unlock:
926 spin_unlock(&hb1->lock); 884 double_unlock_hb(hb1, hb2);
927 if (hb1 != hb2)
928 spin_unlock(&hb2->lock);
929 885
930 /* drop_futex_key_refs() must be called outside the spinlocks. */ 886 /* drop_futex_key_refs() must be called outside the spinlocks. */
931 while (--drop_count >= 0) 887 while (--drop_count >= 0)
@@ -1063,7 +1019,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1063 struct futex_pi_state *pi_state = q->pi_state; 1019 struct futex_pi_state *pi_state = q->pi_state;
1064 struct task_struct *oldowner = pi_state->owner; 1020 struct task_struct *oldowner = pi_state->owner;
1065 u32 uval, curval, newval; 1021 u32 uval, curval, newval;
1066 int ret, attempt = 0; 1022 int ret;
1067 1023
1068 /* Owner died? */ 1024 /* Owner died? */
1069 if (!pi_state->owner) 1025 if (!pi_state->owner)
@@ -1076,11 +1032,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1076 * in the user space variable. This must be atomic as we have 1032 * in the user space variable. This must be atomic as we have
1077 * to preserve the owner died bit here. 1033 * to preserve the owner died bit here.
1078 * 1034 *
1079 * Note: We write the user space value _before_ changing the 1035 * Note: We write the user space value _before_ changing the pi_state
1080 * pi_state because we can fault here. Imagine swapped out 1036 * because we can fault here. Imagine swapped out pages or a fork
1081 * pages or a fork, which was running right before we acquired 1037 * that marked all the anonymous memory readonly for cow.
1082 * mmap_sem, that marked all the anonymous memory readonly for
1083 * cow.
1084 * 1038 *
1085 * Modifying pi_state _before_ the user space value would 1039 * Modifying pi_state _before_ the user space value would
1086 * leave the pi_state in an inconsistent state when we fault 1040 * leave the pi_state in an inconsistent state when we fault
@@ -1136,7 +1090,7 @@ retry:
1136handle_fault: 1090handle_fault:
1137 spin_unlock(q->lock_ptr); 1091 spin_unlock(q->lock_ptr);
1138 1092
1139 ret = futex_handle_fault((unsigned long)uaddr, attempt++); 1093 ret = get_user(uval, uaddr);
1140 1094
1141 spin_lock(q->lock_ptr); 1095 spin_lock(q->lock_ptr);
1142 1096
@@ -1185,10 +1139,11 @@ retry:
1185 if (unlikely(ret != 0)) 1139 if (unlikely(ret != 0))
1186 goto out; 1140 goto out;
1187 1141
1142retry_private:
1188 hb = queue_lock(&q); 1143 hb = queue_lock(&q);
1189 1144
1190 /* 1145 /*
1191 * Access the page AFTER the futex is queued. 1146 * Access the page AFTER the hash-bucket is locked.
1192 * Order is important: 1147 * Order is important:
1193 * 1148 *
1194 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); 1149 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
@@ -1204,20 +1159,23 @@ retry:
1204 * a wakeup when *uaddr != val on entry to the syscall. This is 1159 * a wakeup when *uaddr != val on entry to the syscall. This is
1205 * rare, but normal. 1160 * rare, but normal.
1206 * 1161 *
1207 * for shared futexes, we hold the mmap semaphore, so the mapping 1162 * For shared futexes, we hold the mmap semaphore, so the mapping
1208 * cannot have changed since we looked it up in get_futex_key. 1163 * cannot have changed since we looked it up in get_futex_key.
1209 */ 1164 */
1210 ret = get_futex_value_locked(&uval, uaddr); 1165 ret = get_futex_value_locked(&uval, uaddr);
1211 1166
1212 if (unlikely(ret)) { 1167 if (unlikely(ret)) {
1213 queue_unlock(&q, hb); 1168 queue_unlock(&q, hb);
1214 put_futex_key(fshared, &q.key);
1215 1169
1216 ret = get_user(uval, uaddr); 1170 ret = get_user(uval, uaddr);
1171 if (ret)
1172 goto out_put_key;
1217 1173
1218 if (!ret) 1174 if (!fshared)
1219 goto retry; 1175 goto retry_private;
1220 goto out; 1176
1177 put_futex_key(fshared, &q.key);
1178 goto retry;
1221 } 1179 }
1222 ret = -EWOULDBLOCK; 1180 ret = -EWOULDBLOCK;
1223 if (unlikely(uval != val)) { 1181 if (unlikely(uval != val)) {
@@ -1248,16 +1206,13 @@ retry:
1248 if (!abs_time) 1206 if (!abs_time)
1249 schedule(); 1207 schedule();
1250 else { 1208 else {
1251 unsigned long slack;
1252 slack = current->timer_slack_ns;
1253 if (rt_task(current))
1254 slack = 0;
1255 hrtimer_init_on_stack(&t.timer, 1209 hrtimer_init_on_stack(&t.timer,
1256 clockrt ? CLOCK_REALTIME : 1210 clockrt ? CLOCK_REALTIME :
1257 CLOCK_MONOTONIC, 1211 CLOCK_MONOTONIC,
1258 HRTIMER_MODE_ABS); 1212 HRTIMER_MODE_ABS);
1259 hrtimer_init_sleeper(&t, current); 1213 hrtimer_init_sleeper(&t, current);
1260 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); 1214 hrtimer_set_expires_range_ns(&t.timer, *abs_time,
1215 current->timer_slack_ns);
1261 1216
1262 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); 1217 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1263 if (!hrtimer_active(&t.timer)) 1218 if (!hrtimer_active(&t.timer))
@@ -1354,7 +1309,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1354 struct futex_hash_bucket *hb; 1309 struct futex_hash_bucket *hb;
1355 u32 uval, newval, curval; 1310 u32 uval, newval, curval;
1356 struct futex_q q; 1311 struct futex_q q;
1357 int ret, lock_taken, ownerdied = 0, attempt = 0; 1312 int ret, lock_taken, ownerdied = 0;
1358 1313
1359 if (refill_pi_state_cache()) 1314 if (refill_pi_state_cache())
1360 return -ENOMEM; 1315 return -ENOMEM;
@@ -1374,7 +1329,7 @@ retry:
1374 if (unlikely(ret != 0)) 1329 if (unlikely(ret != 0))
1375 goto out; 1330 goto out;
1376 1331
1377retry_unlocked: 1332retry_private:
1378 hb = queue_lock(&q); 1333 hb = queue_lock(&q);
1379 1334
1380retry_locked: 1335retry_locked:
@@ -1458,6 +1413,7 @@ retry_locked:
1458 * exit to complete. 1413 * exit to complete.
1459 */ 1414 */
1460 queue_unlock(&q, hb); 1415 queue_unlock(&q, hb);
1416 put_futex_key(fshared, &q.key);
1461 cond_resched(); 1417 cond_resched();
1462 goto retry; 1418 goto retry;
1463 1419
@@ -1564,6 +1520,13 @@ retry_locked:
1564 } 1520 }
1565 } 1521 }
1566 1522
1523 /*
1524 * If fixup_pi_state_owner() faulted and was unable to handle the
1525 * fault, unlock it and return the fault to userspace.
1526 */
1527 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1528 rt_mutex_unlock(&q.pi_state->pi_mutex);
1529
1567 /* Unqueue and drop the lock */ 1530 /* Unqueue and drop the lock */
1568 unqueue_me_pi(&q); 1531 unqueue_me_pi(&q);
1569 1532
@@ -1591,22 +1554,18 @@ uaddr_faulted:
1591 */ 1554 */
1592 queue_unlock(&q, hb); 1555 queue_unlock(&q, hb);
1593 1556
1594 if (attempt++) {
1595 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1596 if (ret)
1597 goto out_put_key;
1598 goto retry_unlocked;
1599 }
1600
1601 ret = get_user(uval, uaddr); 1557 ret = get_user(uval, uaddr);
1602 if (!ret) 1558 if (ret)
1603 goto retry; 1559 goto out_put_key;
1604 1560
1605 if (to) 1561 if (!fshared)
1606 destroy_hrtimer_on_stack(&to->timer); 1562 goto retry_private;
1607 return ret; 1563
1564 put_futex_key(fshared, &q.key);
1565 goto retry;
1608} 1566}
1609 1567
1568
1610/* 1569/*
1611 * Userspace attempted a TID -> 0 atomic transition, and failed. 1570 * Userspace attempted a TID -> 0 atomic transition, and failed.
1612 * This is the in-kernel slowpath: we look up the PI state (if any), 1571 * This is the in-kernel slowpath: we look up the PI state (if any),
@@ -1619,7 +1578,7 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1619 u32 uval; 1578 u32 uval;
1620 struct plist_head *head; 1579 struct plist_head *head;
1621 union futex_key key = FUTEX_KEY_INIT; 1580 union futex_key key = FUTEX_KEY_INIT;
1622 int ret, attempt = 0; 1581 int ret;
1623 1582
1624retry: 1583retry:
1625 if (get_user(uval, uaddr)) 1584 if (get_user(uval, uaddr))
@@ -1635,7 +1594,6 @@ retry:
1635 goto out; 1594 goto out;
1636 1595
1637 hb = hash_futex(&key); 1596 hb = hash_futex(&key);
1638retry_unlocked:
1639 spin_lock(&hb->lock); 1597 spin_lock(&hb->lock);
1640 1598
1641 /* 1599 /*
@@ -1700,14 +1658,7 @@ pi_faulted:
1700 * we have to drop the mmap_sem in order to call get_user(). 1658 * we have to drop the mmap_sem in order to call get_user().
1701 */ 1659 */
1702 spin_unlock(&hb->lock); 1660 spin_unlock(&hb->lock);
1703 1661 put_futex_key(fshared, &key);
1704 if (attempt++) {
1705 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1706 if (ret)
1707 goto out;
1708 uval = 0;
1709 goto retry_unlocked;
1710 }
1711 1662
1712 ret = get_user(uval, uaddr); 1663 ret = get_user(uval, uaddr);
1713 if (!ret) 1664 if (!ret)
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 4dd5b1edac98..3394f8f52964 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o 6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
7obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 122fef4b0bd3..c687ba4363f2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -81,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq)
81 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
82 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
83 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc);
84 spin_unlock_irqrestore(&desc->lock, flags); 85 spin_unlock_irqrestore(&desc->lock, flags);
85} 86}
86 87
@@ -293,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
293 desc->chip->mask_ack(irq); 294 desc->chip->mask_ack(irq);
294 else { 295 else {
295 desc->chip->mask(irq); 296 desc->chip->mask(irq);
296 desc->chip->ack(irq); 297 if (desc->chip->ack)
298 desc->chip->ack(irq);
297 } 299 }
298} 300}
299 301
@@ -479,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
479 kstat_incr_irqs_this_cpu(irq, desc); 481 kstat_incr_irqs_this_cpu(irq, desc);
480 482
481 /* Start handling the irq */ 483 /* Start handling the irq */
482 desc->chip->ack(irq); 484 if (desc->chip->ack)
485 desc->chip->ack(irq);
483 desc = irq_remap_to_desc(irq, desc); 486 desc = irq_remap_to_desc(irq, desc);
484 487
485 /* Mark the IRQ currently in progress.*/ 488 /* Mark the IRQ currently in progress.*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 412370ab9a34..343acecae629 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -83,19 +83,21 @@ static struct irq_desc irq_desc_init = {
83 83
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85{ 85{
86 unsigned long bytes;
87 char *ptr;
88 int node; 86 int node;
89 87 void *ptr;
90 /* Compute how many bytes we need per irq and allocate them */
91 bytes = nr * sizeof(unsigned int);
92 88
93 node = cpu_to_node(cpu); 89 node = cpu_to_node(cpu);
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 91
97 if (ptr) 92 /*
98 desc->kstat_irqs = (unsigned int *)ptr; 93 * don't overwite if can not get new one
94 * init_copy_kstat_irqs() could still use old one
95 */
96 if (ptr) {
97 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
98 cpu, node);
99 desc->kstat_irqs = ptr;
100 }
99} 101}
100 102
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 103static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -238,6 +240,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
238 } 240 }
239}; 241};
240 242
243static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
241int __init early_irq_init(void) 244int __init early_irq_init(void)
242{ 245{
243 struct irq_desc *desc; 246 struct irq_desc *desc;
@@ -254,6 +257,7 @@ int __init early_irq_init(void)
254 for (i = 0; i < count; i++) { 257 for (i = 0; i < count; i++) {
255 desc[i].irq = i; 258 desc[i].irq = i;
256 init_alloc_desc_masks(&desc[i], 0, true); 259 init_alloc_desc_masks(&desc[i], 0, true);
260 desc[i].kstat_irqs = kstat_irqs_all[i];
257 } 261 }
258 return arch_early_irq_init(); 262 return arch_early_irq_init();
259} 263}
@@ -269,6 +273,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
269} 273}
270#endif /* !CONFIG_SPARSE_IRQ */ 274#endif /* !CONFIG_SPARSE_IRQ */
271 275
276void clear_kstat_irqs(struct irq_desc *desc)
277{
278 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
279}
280
272/* 281/*
273 * What should we do if we get a hw irq event on an illegal vector? 282 * What should we do if we get a hw irq event on an illegal vector?
274 * Each architecture has to answer this themself. 283 * Each architecture has to answer this themself.
@@ -345,6 +354,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
345 irqreturn_t ret, retval = IRQ_NONE; 354 irqreturn_t ret, retval = IRQ_NONE;
346 unsigned int status = 0; 355 unsigned int status = 0;
347 356
357 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
358
348 if (!(action->flags & IRQF_DISABLED)) 359 if (!(action->flags & IRQF_DISABLED))
349 local_irq_enable_in_hardirq(); 360 local_irq_enable_in_hardirq();
350 361
@@ -366,6 +377,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
366} 377}
367 378
368#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 379#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
380
381#ifdef CONFIG_ENABLE_WARN_DEPRECATED
382# warning __do_IRQ is deprecated. Please convert to proper flow handlers
383#endif
384
369/** 385/**
370 * __do_IRQ - original all in one highlevel IRQ handler 386 * __do_IRQ - original all in one highlevel IRQ handler
371 * @irq: the interrupt number 387 * @irq: the interrupt number
@@ -486,12 +502,10 @@ void early_init_irq_lock_class(void)
486 } 502 }
487} 503}
488 504
489#ifdef CONFIG_SPARSE_IRQ
490unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 505unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
491{ 506{
492 struct irq_desc *desc = irq_to_desc(irq); 507 struct irq_desc *desc = irq_to_desc(irq);
493 return desc ? desc->kstat_irqs[cpu] : 0; 508 return desc ? desc->kstat_irqs[cpu] : 0;
494} 509}
495#endif
496EXPORT_SYMBOL(kstat_irqs_cpu); 510EXPORT_SYMBOL(kstat_irqs_cpu);
497 511
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 40416a81a0f5..01ce20eab38f 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -12,9 +12,12 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
12 12
13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
14 unsigned long flags); 14 unsigned long flags);
15extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
16extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
15 17
16extern struct lock_class_key irq_desc_lock_class; 18extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 19extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc);
18extern spinlock_t sparse_irq_lock; 21extern spinlock_t sparse_irq_lock;
19 22
20#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a3a5dc9ef346..1516ab77355c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109/* 109/*
110 * Generic version of the affinity autoselector. 110 * Generic version of the affinity autoselector.
111 */ 111 */
112int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 112static int setup_affinity(unsigned int irq, struct irq_desc *desc)
113{ 113{
114 if (!irq_can_set_affinity(irq)) 114 if (!irq_can_set_affinity(irq))
115 return 0; 115 return 0;
@@ -133,7 +133,7 @@ set_affinity:
133 return 0; 133 return 0;
134} 134}
135#else 135#else
136static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 136static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
137{ 137{
138 return irq_select_affinity(irq); 138 return irq_select_affinity(irq);
139} 139}
@@ -149,19 +149,33 @@ int irq_select_affinity_usr(unsigned int irq)
149 int ret; 149 int ret;
150 150
151 spin_lock_irqsave(&desc->lock, flags); 151 spin_lock_irqsave(&desc->lock, flags);
152 ret = do_irq_select_affinity(irq, desc); 152 ret = setup_affinity(irq, desc);
153 spin_unlock_irqrestore(&desc->lock, flags); 153 spin_unlock_irqrestore(&desc->lock, flags);
154 154
155 return ret; 155 return ret;
156} 156}
157 157
158#else 158#else
159static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 159static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
160{ 160{
161 return 0; 161 return 0;
162} 162}
163#endif 163#endif
164 164
165void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
166{
167 if (suspend) {
168 if (!desc->action || (desc->action->flags & IRQF_TIMER))
169 return;
170 desc->status |= IRQ_SUSPENDED;
171 }
172
173 if (!desc->depth++) {
174 desc->status |= IRQ_DISABLED;
175 desc->chip->disable(irq);
176 }
177}
178
165/** 179/**
166 * disable_irq_nosync - disable an irq without waiting 180 * disable_irq_nosync - disable an irq without waiting
167 * @irq: Interrupt to disable 181 * @irq: Interrupt to disable
@@ -182,10 +196,7 @@ void disable_irq_nosync(unsigned int irq)
182 return; 196 return;
183 197
184 spin_lock_irqsave(&desc->lock, flags); 198 spin_lock_irqsave(&desc->lock, flags);
185 if (!desc->depth++) { 199 __disable_irq(desc, irq, false);
186 desc->status |= IRQ_DISABLED;
187 desc->chip->disable(irq);
188 }
189 spin_unlock_irqrestore(&desc->lock, flags); 200 spin_unlock_irqrestore(&desc->lock, flags);
190} 201}
191EXPORT_SYMBOL(disable_irq_nosync); 202EXPORT_SYMBOL(disable_irq_nosync);
@@ -215,15 +226,21 @@ void disable_irq(unsigned int irq)
215} 226}
216EXPORT_SYMBOL(disable_irq); 227EXPORT_SYMBOL(disable_irq);
217 228
218static void __enable_irq(struct irq_desc *desc, unsigned int irq) 229void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
219{ 230{
231 if (resume)
232 desc->status &= ~IRQ_SUSPENDED;
233
220 switch (desc->depth) { 234 switch (desc->depth) {
221 case 0: 235 case 0:
236 err_out:
222 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 237 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
223 break; 238 break;
224 case 1: { 239 case 1: {
225 unsigned int status = desc->status & ~IRQ_DISABLED; 240 unsigned int status = desc->status & ~IRQ_DISABLED;
226 241
242 if (desc->status & IRQ_SUSPENDED)
243 goto err_out;
227 /* Prevent probing on this irq: */ 244 /* Prevent probing on this irq: */
228 desc->status = status | IRQ_NOPROBE; 245 desc->status = status | IRQ_NOPROBE;
229 check_irq_resend(desc, irq); 246 check_irq_resend(desc, irq);
@@ -253,7 +270,7 @@ void enable_irq(unsigned int irq)
253 return; 270 return;
254 271
255 spin_lock_irqsave(&desc->lock, flags); 272 spin_lock_irqsave(&desc->lock, flags);
256 __enable_irq(desc, irq); 273 __enable_irq(desc, irq, false);
257 spin_unlock_irqrestore(&desc->lock, flags); 274 spin_unlock_irqrestore(&desc->lock, flags);
258} 275}
259EXPORT_SYMBOL(enable_irq); 276EXPORT_SYMBOL(enable_irq);
@@ -389,9 +406,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
389 * allocate special interrupts that are part of the architecture. 406 * allocate special interrupts that are part of the architecture.
390 */ 407 */
391static int 408static int
392__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) 409__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
393{ 410{
394 struct irqaction *old, **p; 411 struct irqaction *old, **old_ptr;
395 const char *old_name = NULL; 412 const char *old_name = NULL;
396 unsigned long flags; 413 unsigned long flags;
397 int shared = 0; 414 int shared = 0;
@@ -423,8 +440,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
423 * The following block of code has to be executed atomically 440 * The following block of code has to be executed atomically
424 */ 441 */
425 spin_lock_irqsave(&desc->lock, flags); 442 spin_lock_irqsave(&desc->lock, flags);
426 p = &desc->action; 443 old_ptr = &desc->action;
427 old = *p; 444 old = *old_ptr;
428 if (old) { 445 if (old) {
429 /* 446 /*
430 * Can't share interrupts unless both agree to and are 447 * Can't share interrupts unless both agree to and are
@@ -447,8 +464,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
447 464
448 /* add new interrupt at end of irq queue */ 465 /* add new interrupt at end of irq queue */
449 do { 466 do {
450 p = &old->next; 467 old_ptr = &old->next;
451 old = *p; 468 old = *old_ptr;
452 } while (old); 469 } while (old);
453 shared = 1; 470 shared = 1;
454 } 471 }
@@ -488,7 +505,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
488 desc->status |= IRQ_NO_BALANCING; 505 desc->status |= IRQ_NO_BALANCING;
489 506
490 /* Set default affinity mask once everything is setup */ 507 /* Set default affinity mask once everything is setup */
491 do_irq_select_affinity(irq, desc); 508 setup_affinity(irq, desc);
492 509
493 } else if ((new->flags & IRQF_TRIGGER_MASK) 510 } else if ((new->flags & IRQF_TRIGGER_MASK)
494 && (new->flags & IRQF_TRIGGER_MASK) 511 && (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +516,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
499 (int)(new->flags & IRQF_TRIGGER_MASK)); 516 (int)(new->flags & IRQF_TRIGGER_MASK));
500 } 517 }
501 518
502 *p = new; 519 *old_ptr = new;
503 520
504 /* Reset broken irq detection when installing new handler */ 521 /* Reset broken irq detection when installing new handler */
505 desc->irq_count = 0; 522 desc->irq_count = 0;
@@ -511,7 +528,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
511 */ 528 */
512 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 529 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
513 desc->status &= ~IRQ_SPURIOUS_DISABLED; 530 desc->status &= ~IRQ_SPURIOUS_DISABLED;
514 __enable_irq(desc, irq); 531 __enable_irq(desc, irq, false);
515 } 532 }
516 533
517 spin_unlock_irqrestore(&desc->lock, flags); 534 spin_unlock_irqrestore(&desc->lock, flags);
@@ -549,90 +566,117 @@ int setup_irq(unsigned int irq, struct irqaction *act)
549 566
550 return __setup_irq(irq, desc, act); 567 return __setup_irq(irq, desc, act);
551} 568}
569EXPORT_SYMBOL_GPL(setup_irq);
552 570
553/** 571 /*
554 * free_irq - free an interrupt 572 * Internal function to unregister an irqaction - used to free
555 * @irq: Interrupt line to free 573 * regular and special interrupts that are part of the architecture.
556 * @dev_id: Device identity to free
557 *
558 * Remove an interrupt handler. The handler is removed and if the
559 * interrupt line is no longer in use by any driver it is disabled.
560 * On a shared IRQ the caller must ensure the interrupt is disabled
561 * on the card it drives before calling this function. The function
562 * does not return until any executing interrupts for this IRQ
563 * have completed.
564 *
565 * This function must not be called from interrupt context.
566 */ 574 */
567void free_irq(unsigned int irq, void *dev_id) 575static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
568{ 576{
569 struct irq_desc *desc = irq_to_desc(irq); 577 struct irq_desc *desc = irq_to_desc(irq);
570 struct irqaction **p; 578 struct irqaction *action, **action_ptr;
571 unsigned long flags; 579 unsigned long flags;
572 580
573 WARN_ON(in_interrupt()); 581 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
574 582
575 if (!desc) 583 if (!desc)
576 return; 584 return NULL;
577 585
578 spin_lock_irqsave(&desc->lock, flags); 586 spin_lock_irqsave(&desc->lock, flags);
579 p = &desc->action; 587
588 /*
589 * There can be multiple actions per IRQ descriptor, find the right
590 * one based on the dev_id:
591 */
592 action_ptr = &desc->action;
580 for (;;) { 593 for (;;) {
581 struct irqaction *action = *p; 594 action = *action_ptr;
582 595
583 if (action) { 596 if (!action) {
584 struct irqaction **pp = p; 597 WARN(1, "Trying to free already-free IRQ %d\n", irq);
598 spin_unlock_irqrestore(&desc->lock, flags);
585 599
586 p = &action->next; 600 return NULL;
587 if (action->dev_id != dev_id) 601 }
588 continue;
589 602
590 /* Found it - now remove it from the list of entries */ 603 if (action->dev_id == dev_id)
591 *pp = action->next; 604 break;
605 action_ptr = &action->next;
606 }
592 607
593 /* Currently used only by UML, might disappear one day.*/ 608 /* Found it - now remove it from the list of entries: */
609 *action_ptr = action->next;
610
611 /* Currently used only by UML, might disappear one day: */
594#ifdef CONFIG_IRQ_RELEASE_METHOD 612#ifdef CONFIG_IRQ_RELEASE_METHOD
595 if (desc->chip->release) 613 if (desc->chip->release)
596 desc->chip->release(irq, dev_id); 614 desc->chip->release(irq, dev_id);
597#endif 615#endif
598 616
599 if (!desc->action) { 617 /* If this was the last handler, shut down the IRQ line: */
600 desc->status |= IRQ_DISABLED; 618 if (!desc->action) {
601 if (desc->chip->shutdown) 619 desc->status |= IRQ_DISABLED;
602 desc->chip->shutdown(irq); 620 if (desc->chip->shutdown)
603 else 621 desc->chip->shutdown(irq);
604 desc->chip->disable(irq); 622 else
605 } 623 desc->chip->disable(irq);
606 spin_unlock_irqrestore(&desc->lock, flags); 624 }
607 unregister_handler_proc(irq, action); 625 spin_unlock_irqrestore(&desc->lock, flags);
626
627 unregister_handler_proc(irq, action);
628
629 /* Make sure it's not being used on another CPU: */
630 synchronize_irq(irq);
608 631
609 /* Make sure it's not being used on another CPU */
610 synchronize_irq(irq);
611#ifdef CONFIG_DEBUG_SHIRQ
612 /*
613 * It's a shared IRQ -- the driver ought to be
614 * prepared for it to happen even now it's
615 * being freed, so let's make sure.... We do
616 * this after actually deregistering it, to
617 * make sure that a 'real' IRQ doesn't run in
618 * parallel with our fake
619 */
620 if (action->flags & IRQF_SHARED) {
621 local_irq_save(flags);
622 action->handler(irq, dev_id);
623 local_irq_restore(flags);
624 }
625#endif
626 kfree(action);
627 return;
628 }
629 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
630#ifdef CONFIG_DEBUG_SHIRQ 632#ifdef CONFIG_DEBUG_SHIRQ
631 dump_stack(); 633 /*
632#endif 634 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
633 spin_unlock_irqrestore(&desc->lock, flags); 635 * event to happen even now it's being freed, so let's make sure that
634 return; 636 * is so by doing an extra call to the handler ....
637 *
638 * ( We do this after actually deregistering it, to make sure that a
639 * 'real' IRQ doesn't run in * parallel with our fake. )
640 */
641 if (action->flags & IRQF_SHARED) {
642 local_irq_save(flags);
643 action->handler(irq, dev_id);
644 local_irq_restore(flags);
635 } 645 }
646#endif
647 return action;
648}
649
650/**
651 * remove_irq - free an interrupt
652 * @irq: Interrupt line to free
653 * @act: irqaction for the interrupt
654 *
655 * Used to remove interrupts statically setup by the early boot process.
656 */
657void remove_irq(unsigned int irq, struct irqaction *act)
658{
659 __free_irq(irq, act->dev_id);
660}
661EXPORT_SYMBOL_GPL(remove_irq);
662
663/**
664 * free_irq - free an interrupt allocated with request_irq
665 * @irq: Interrupt line to free
666 * @dev_id: Device identity to free
667 *
668 * Remove an interrupt handler. The handler is removed and if the
669 * interrupt line is no longer in use by any driver it is disabled.
670 * On a shared IRQ the caller must ensure the interrupt is disabled
671 * on the card it drives before calling this function. The function
672 * does not return until any executing interrupts for this IRQ
673 * have completed.
674 *
675 * This function must not be called from interrupt context.
676 */
677void free_irq(unsigned int irq, void *dev_id)
678{
679 kfree(__free_irq(irq, dev_id));
636} 680}
637EXPORT_SYMBOL(free_irq); 681EXPORT_SYMBOL(free_irq);
638 682
@@ -679,11 +723,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
679 * the behavior is classified as "will not fix" so we need to 723 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom. 724 * start nudging drivers away from using that idiom.
681 */ 725 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) 726 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
683 == (IRQF_SHARED|IRQF_DISABLED)) 727 (IRQF_SHARED|IRQF_DISABLED)) {
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not " 728 pr_warning(
685 "guaranteed on shared IRQs\n", 729 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
686 irq, devname); 730 irq, devname);
731 }
687 732
688#ifdef CONFIG_LOCKDEP 733#ifdef CONFIG_LOCKDEP
689 /* 734 /*
@@ -709,15 +754,13 @@ int request_irq(unsigned int irq, irq_handler_t handler,
709 if (!handler) 754 if (!handler)
710 return -EINVAL; 755 return -EINVAL;
711 756
712 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 757 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
713 if (!action) 758 if (!action)
714 return -ENOMEM; 759 return -ENOMEM;
715 760
716 action->handler = handler; 761 action->handler = handler;
717 action->flags = irqflags; 762 action->flags = irqflags;
718 cpus_clear(action->mask);
719 action->name = devname; 763 action->name = devname;
720 action->next = NULL;
721 action->dev_id = dev_id; 764 action->dev_id = dev_id;
722 765
723 retval = __setup_irq(irq, desc, action); 766 retval = __setup_irq(irq, desc, action);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 7f9b80434e32..243d6121e50e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc, 17 struct irq_desc *desc,
18 int cpu, int nr) 18 int cpu, int nr)
19{ 19{
20 unsigned long bytes;
21
22 init_kstat_irqs(desc, cpu, nr); 20 init_kstat_irqs(desc, cpu, nr);
23 21
24 if (desc->kstat_irqs != old_desc->kstat_irqs) { 22 if (desc->kstat_irqs != old_desc->kstat_irqs)
25 /* Compute how many bytes we need per irq and allocate them */ 23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
26 bytes = nr * sizeof(unsigned int); 24 nr * sizeof(*desc->kstat_irqs));
27
28 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
29 }
30} 25}
31 26
32static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 27static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
new file mode 100644
index 000000000000..638d8bedec14
--- /dev/null
+++ b/kernel/irq/pm.c
@@ -0,0 +1,79 @@
1/*
2 * linux/kernel/irq/pm.c
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file contains power management functions related to interrupts.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/interrupt.h>
12
13#include "internals.h"
14
15/**
16 * suspend_device_irqs - disable all currently enabled interrupt lines
17 *
18 * During system-wide suspend or hibernation device interrupts need to be
19 * disabled at the chip level and this function is provided for this purpose.
20 * It disables all interrupt lines that are enabled at the moment and sets the
21 * IRQ_SUSPENDED flag for them.
22 */
23void suspend_device_irqs(void)
24{
25 struct irq_desc *desc;
26 int irq;
27
28 for_each_irq_desc(irq, desc) {
29 unsigned long flags;
30
31 spin_lock_irqsave(&desc->lock, flags);
32 __disable_irq(desc, irq, true);
33 spin_unlock_irqrestore(&desc->lock, flags);
34 }
35
36 for_each_irq_desc(irq, desc)
37 if (desc->status & IRQ_SUSPENDED)
38 synchronize_irq(irq);
39}
40EXPORT_SYMBOL_GPL(suspend_device_irqs);
41
42/**
43 * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
44 *
45 * Enable all interrupt lines previously disabled by suspend_device_irqs() that
46 * have the IRQ_SUSPENDED flag set.
47 */
48void resume_device_irqs(void)
49{
50 struct irq_desc *desc;
51 int irq;
52
53 for_each_irq_desc(irq, desc) {
54 unsigned long flags;
55
56 if (!(desc->status & IRQ_SUSPENDED))
57 continue;
58
59 spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true);
61 spin_unlock_irqrestore(&desc->lock, flags);
62 }
63}
64EXPORT_SYMBOL_GPL(resume_device_irqs);
65
66/**
67 * check_wakeup_irqs - check if any wake-up interrupts are pending
68 */
69int check_wakeup_irqs(void)
70{
71 struct irq_desc *desc;
72 int irq;
73
74 for_each_irq_desc(irq, desc)
75 if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
76 return -EBUSY;
77
78 return 0;
79}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..4d568294de3e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_spurious_irqs(unsigned long dummy) 107static void poll_all_shared_irqs(void)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
123 123
124 try_one_irq(i, desc); 124 try_one_irq(i, desc);
125 } 125 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
126 131
127 mod_timer(&poll_spurious_irq_timer, 132 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129} 134}
130 135
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
131/* 143/*
132 * If 99,900 of the previous 100,000 interrupts have not been handled 144 * If 99,900 of the previous 100,000 interrupts have not been handled
133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c7fd6692939d..93eed85fe017 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1450,11 +1450,7 @@ int kernel_kexec(void)
1450 error = device_suspend(PMSG_FREEZE); 1450 error = device_suspend(PMSG_FREEZE);
1451 if (error) 1451 if (error)
1452 goto Resume_console; 1452 goto Resume_console;
1453 error = disable_nonboot_cpus();
1454 if (error)
1455 goto Resume_devices;
1456 device_pm_lock(); 1453 device_pm_lock();
1457 local_irq_disable();
1458 /* At this point, device_suspend() has been called, 1454 /* At this point, device_suspend() has been called,
1459 * but *not* device_power_down(). We *must* 1455 * but *not* device_power_down(). We *must*
1460 * device_power_down() now. Otherwise, drivers for 1456 * device_power_down() now. Otherwise, drivers for
@@ -1464,12 +1460,15 @@ int kernel_kexec(void)
1464 */ 1460 */
1465 error = device_power_down(PMSG_FREEZE); 1461 error = device_power_down(PMSG_FREEZE);
1466 if (error) 1462 if (error)
1467 goto Enable_irqs; 1463 goto Resume_devices;
1468 1464 error = disable_nonboot_cpus();
1465 if (error)
1466 goto Enable_cpus;
1467 local_irq_disable();
1469 /* Suspend system devices */ 1468 /* Suspend system devices */
1470 error = sysdev_suspend(PMSG_FREEZE); 1469 error = sysdev_suspend(PMSG_FREEZE);
1471 if (error) 1470 if (error)
1472 goto Power_up_devices; 1471 goto Enable_irqs;
1473 } else 1472 } else
1474#endif 1473#endif
1475 { 1474 {
@@ -1483,13 +1482,13 @@ int kernel_kexec(void)
1483#ifdef CONFIG_KEXEC_JUMP 1482#ifdef CONFIG_KEXEC_JUMP
1484 if (kexec_image->preserve_context) { 1483 if (kexec_image->preserve_context) {
1485 sysdev_resume(); 1484 sysdev_resume();
1486 Power_up_devices:
1487 device_power_up(PMSG_RESTORE);
1488 Enable_irqs: 1485 Enable_irqs:
1489 local_irq_enable(); 1486 local_irq_enable();
1490 device_pm_unlock(); 1487 Enable_cpus:
1491 enable_nonboot_cpus(); 1488 enable_nonboot_cpus();
1489 device_power_up(PMSG_RESTORE);
1492 Resume_devices: 1490 Resume_devices:
1491 device_pm_unlock();
1493 device_resume(PMSG_RESTORE); 1492 device_resume(PMSG_RESTORE);
1494 Resume_console: 1493 Resume_console:
1495 resume_console(); 1494 resume_console();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a27a5f64443d..f0c8f545180d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -167,7 +167,7 @@ static int ____call_usermodehelper(void *data)
167 } 167 }
168 168
169 /* We can run anywhere, unlike our parent keventd(). */ 169 /* We can run anywhere, unlike our parent keventd(). */
170 set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); 170 set_cpus_allowed_ptr(current, cpu_all_mask);
171 171
172 /* 172 /*
173 * Our parent is keventd, which runs with elevated scheduling priority. 173 * Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4fbc456f393d..84bbadd4d021 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create)
110 */ 110 */
111 sched_setscheduler(create->result, SCHED_NORMAL, &param); 111 sched_setscheduler(create->result, SCHED_NORMAL, &param);
112 set_user_nice(create->result, KTHREAD_NICE_LEVEL); 112 set_user_nice(create->result, KTHREAD_NICE_LEVEL);
113 set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR); 113 set_cpus_allowed_ptr(create->result, cpu_all_mask);
114 } 114 }
115 complete(&create->done); 115 complete(&create->done);
116} 116}
@@ -240,7 +240,7 @@ int kthreadd(void *unused)
240 set_task_comm(tsk, "kthreadd"); 240 set_task_comm(tsk, "kthreadd");
241 ignore_signals(tsk); 241 ignore_signals(tsk);
242 set_user_nice(tsk, KTHREAD_NICE_LEVEL); 242 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
243 set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR); 243 set_cpus_allowed_ptr(tsk, cpu_all_mask);
244 244
245 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 245 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
246 246
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 449db466bdbc..ca07c5c0c914 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -9,6 +9,44 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12
13/*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
12#include <linux/latencytop.h> 50#include <linux/latencytop.h>
13#include <linux/kallsyms.h> 51#include <linux/kallsyms.h>
14#include <linux/seq_file.h> 52#include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
72 firstnonnull = i; 110 firstnonnull = i;
73 continue; 111 continue;
74 } 112 }
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
76 unsigned long record = lat->backtrace[q]; 114 unsigned long record = lat->backtrace[q];
77 115
78 if (latency_record[i].backtrace[q] != record) { 116 if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 139 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
102} 140}
103 141
104static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) 142/*
143 * Iterator to store a backtrace into a latency record entry
144 */
145static inline void store_stacktrace(struct task_struct *tsk,
146 struct latency_record *lat)
105{ 147{
106 struct stack_trace trace; 148 struct stack_trace trace;
107 149
108 memset(&trace, 0, sizeof(trace)); 150 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH; 151 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0]; 152 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace); 153 save_stack_trace_tsk(tsk, &trace);
113} 154}
114 155
156/**
157 * __account_scheduler_latency - record an occured latency
158 * @tsk - the task struct of the task hitting the latency
159 * @usecs - the duration of the latency in microseconds
160 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161 *
162 * This function is the main entry point for recording latency entries
163 * as called by the scheduler.
164 *
165 * This function has a few special cases to deal with normal 'non-latency'
166 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167 * since this usually is caused by waiting for events via select() and co.
168 *
169 * Negative latencies (caused by time going backwards) are also explicitly
170 * skipped.
171 */
115void __sched 172void __sched
116account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 173__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
117{ 174{
118 unsigned long flags; 175 unsigned long flags;
119 int i, q; 176 int i, q;
120 struct latency_record lat; 177 struct latency_record lat;
121 178
122 if (!latencytop_enabled)
123 return;
124
125 /* Long interruptible waits are generally user requested... */ 179 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000) 180 if (inter && usecs > 5000)
127 return; 181 return;
128 182
183 /* Negative sleeps are time going backwards */
184 /* Zero-time sleeps are non-interesting */
185 if (usecs <= 0)
186 return;
187
129 memset(&lat, 0, sizeof(lat)); 188 memset(&lat, 0, sizeof(lat));
130 lat.count = 1; 189 lat.count = 1;
131 lat.time = usecs; 190 lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
143 if (tsk->latency_record_count >= LT_SAVECOUNT) 202 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock; 203 goto out_unlock;
145 204
146 for (i = 0; i < LT_SAVECOUNT ; i++) { 205 for (i = 0; i < LT_SAVECOUNT; i++) {
147 struct latency_record *mylat; 206 struct latency_record *mylat;
148 int same = 1; 207 int same = 1;
149 208
150 mylat = &tsk->latency_record[i]; 209 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 210 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
152 unsigned long record = lat.backtrace[q]; 211 unsigned long record = lat.backtrace[q];
153 212
154 if (mylat->backtrace[q] != record) { 213 if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
186 for (i = 0; i < MAXLR; i++) { 245 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) { 246 if (latency_record[i].backtrace[0]) {
188 int q; 247 int q;
189 seq_printf(m, "%i %li %li ", 248 seq_printf(m, "%i %lu %lu ",
190 latency_record[i].count, 249 latency_record[i].count,
191 latency_record[i].time, 250 latency_record[i].time,
192 latency_record[i].max); 251 latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
223 return single_open(filp, lstats_show, NULL); 282 return single_open(filp, lstats_show, NULL);
224} 283}
225 284
226static struct file_operations lstats_fops = { 285static const struct file_operations lstats_fops = {
227 .open = lstats_open, 286 .open = lstats_open,
228 .read = seq_read, 287 .read = seq_read,
229 .write = lstats_write, 288 .write = lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
236 proc_create("latency_stats", 0644, NULL, &lstats_fops); 295 proc_create("latency_stats", 0644, NULL, &lstats_fops);
237 return 0; 296 return 0;
238} 297}
239__initcall(init_lstats_procfs); 298device_initcall(init_lstats_procfs);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 71b567f52813..81b5f33970b8 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2255,7 +2255,7 @@ void trace_softirqs_off(unsigned long ip)
2255 debug_atomic_inc(&redundant_softirqs_off); 2255 debug_atomic_inc(&redundant_softirqs_off);
2256} 2256}
2257 2257
2258void lockdep_trace_alloc(gfp_t gfp_mask) 2258static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2259{ 2259{
2260 struct task_struct *curr = current; 2260 struct task_struct *curr = current;
2261 2261
@@ -2274,12 +2274,29 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
2274 if (!(gfp_mask & __GFP_FS)) 2274 if (!(gfp_mask & __GFP_FS))
2275 return; 2275 return;
2276 2276
2277 if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) 2277 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2278 return; 2278 return;
2279 2279
2280 mark_held_locks(curr, RECLAIM_FS); 2280 mark_held_locks(curr, RECLAIM_FS);
2281} 2281}
2282 2282
2283static void check_flags(unsigned long flags);
2284
2285void lockdep_trace_alloc(gfp_t gfp_mask)
2286{
2287 unsigned long flags;
2288
2289 if (unlikely(current->lockdep_recursion))
2290 return;
2291
2292 raw_local_irq_save(flags);
2293 check_flags(flags);
2294 current->lockdep_recursion = 1;
2295 __lockdep_trace_alloc(gfp_mask, flags);
2296 current->lockdep_recursion = 0;
2297 raw_local_irq_restore(flags);
2298}
2299
2283static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 2300static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2284{ 2301{
2285 /* 2302 /*
diff --git a/kernel/module.c b/kernel/module.c
index 7fa134e0cc24..41f50605eed0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -856,7 +856,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
856 mutex_lock(&module_mutex); 856 mutex_lock(&module_mutex);
857 /* Store the name of the last unloaded module for diagnostic purposes */ 857 /* Store the name of the last unloaded module for diagnostic purposes */
858 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 858 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
859 unregister_dynamic_debug_module(mod->name); 859 ddebug_remove_module(mod->name);
860 free_module(mod); 860 free_module(mod);
861 861
862 out: 862 out:
@@ -1861,19 +1861,13 @@ static inline void add_kallsyms(struct module *mod,
1861} 1861}
1862#endif /* CONFIG_KALLSYMS */ 1862#endif /* CONFIG_KALLSYMS */
1863 1863
1864static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num) 1864static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
1865{ 1865{
1866#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG 1866#ifdef CONFIG_DYNAMIC_DEBUG
1867 unsigned int i; 1867 if (ddebug_add_module(debug, num, debug->modname))
1868 1868 printk(KERN_ERR "dynamic debug error adding module: %s\n",
1869 for (i = 0; i < num; i++) { 1869 debug->modname);
1870 register_dynamic_debug_module(debug[i].modname, 1870#endif
1871 debug[i].type,
1872 debug[i].logical_modname,
1873 debug[i].flag_names,
1874 debug[i].hash, debug[i].hash2);
1875 }
1876#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1877} 1871}
1878 1872
1879static void *module_alloc_update_bounds(unsigned long size) 1873static void *module_alloc_update_bounds(unsigned long size)
@@ -2247,12 +2241,13 @@ static noinline struct module *load_module(void __user *umod,
2247 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2241 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2248 2242
2249 if (!mod->taints) { 2243 if (!mod->taints) {
2250 struct mod_debug *debug; 2244 struct _ddebug *debug;
2251 unsigned int num_debug; 2245 unsigned int num_debug;
2252 2246
2253 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2247 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2254 sizeof(*debug), &num_debug); 2248 sizeof(*debug), &num_debug);
2255 dynamic_printk_setup(debug, num_debug); 2249 if (debug)
2250 dynamic_debug_setup(debug, num_debug);
2256 } 2251 }
2257 2252
2258 /* sechdrs[0].sh_size is always zero */ 2253 /* sechdrs[0].sh_size is always zero */
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e976e505648d..8e5d9a68b022 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1371 return 1; 1371 return 1;
1372 } 1372 }
1373 return 0; 1373
1374 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1374} 1375}
1375 1376
1376/* 1377/*
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 4a4a206b1979..f3db382c2b2d 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,7 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <asm/suspend.h>
25 26
26#include "power.h" 27#include "power.h"
27 28
@@ -214,7 +215,7 @@ static int create_image(int platform_mode)
214 return error; 215 return error;
215 216
216 device_pm_lock(); 217 device_pm_lock();
217 local_irq_disable(); 218
218 /* At this point, device_suspend() has been called, but *not* 219 /* At this point, device_suspend() has been called, but *not*
219 * device_power_down(). We *must* call device_power_down() now. 220 * device_power_down(). We *must* call device_power_down() now.
220 * Otherwise, drivers for some devices (e.g. interrupt controllers) 221 * Otherwise, drivers for some devices (e.g. interrupt controllers)
@@ -225,13 +226,25 @@ static int create_image(int platform_mode)
225 if (error) { 226 if (error) {
226 printk(KERN_ERR "PM: Some devices failed to power down, " 227 printk(KERN_ERR "PM: Some devices failed to power down, "
227 "aborting hibernation\n"); 228 "aborting hibernation\n");
228 goto Enable_irqs; 229 goto Unlock;
229 } 230 }
231
232 error = platform_pre_snapshot(platform_mode);
233 if (error || hibernation_test(TEST_PLATFORM))
234 goto Platform_finish;
235
236 error = disable_nonboot_cpus();
237 if (error || hibernation_test(TEST_CPUS)
238 || hibernation_testmode(HIBERNATION_TEST))
239 goto Enable_cpus;
240
241 local_irq_disable();
242
230 sysdev_suspend(PMSG_FREEZE); 243 sysdev_suspend(PMSG_FREEZE);
231 if (error) { 244 if (error) {
232 printk(KERN_ERR "PM: Some devices failed to power down, " 245 printk(KERN_ERR "PM: Some devices failed to power down, "
233 "aborting hibernation\n"); 246 "aborting hibernation\n");
234 goto Power_up_devices; 247 goto Enable_irqs;
235 } 248 }
236 249
237 if (hibernation_test(TEST_CORE)) 250 if (hibernation_test(TEST_CORE))
@@ -247,17 +260,28 @@ static int create_image(int platform_mode)
247 restore_processor_state(); 260 restore_processor_state();
248 if (!in_suspend) 261 if (!in_suspend)
249 platform_leave(platform_mode); 262 platform_leave(platform_mode);
263
250 Power_up: 264 Power_up:
251 sysdev_resume(); 265 sysdev_resume();
252 /* NOTE: device_power_up() is just a resume() for devices 266 /* NOTE: device_power_up() is just a resume() for devices
253 * that suspended with irqs off ... no overall powerup. 267 * that suspended with irqs off ... no overall powerup.
254 */ 268 */
255 Power_up_devices: 269
256 device_power_up(in_suspend ?
257 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
258 Enable_irqs: 270 Enable_irqs:
259 local_irq_enable(); 271 local_irq_enable();
272
273 Enable_cpus:
274 enable_nonboot_cpus();
275
276 Platform_finish:
277 platform_finish(platform_mode);
278
279 device_power_up(in_suspend ?
280 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
281
282 Unlock:
260 device_pm_unlock(); 283 device_pm_unlock();
284
261 return error; 285 return error;
262} 286}
263 287
@@ -291,25 +315,9 @@ int hibernation_snapshot(int platform_mode)
291 if (hibernation_test(TEST_DEVICES)) 315 if (hibernation_test(TEST_DEVICES))
292 goto Recover_platform; 316 goto Recover_platform;
293 317
294 error = platform_pre_snapshot(platform_mode); 318 error = create_image(platform_mode);
295 if (error || hibernation_test(TEST_PLATFORM)) 319 /* Control returns here after successful restore */
296 goto Finish;
297
298 error = disable_nonboot_cpus();
299 if (!error) {
300 if (hibernation_test(TEST_CPUS))
301 goto Enable_cpus;
302
303 if (hibernation_testmode(HIBERNATION_TEST))
304 goto Enable_cpus;
305 320
306 error = create_image(platform_mode);
307 /* Control returns here after successful restore */
308 }
309 Enable_cpus:
310 enable_nonboot_cpus();
311 Finish:
312 platform_finish(platform_mode);
313 Resume_devices: 321 Resume_devices:
314 device_resume(in_suspend ? 322 device_resume(in_suspend ?
315 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 323 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
@@ -331,19 +339,33 @@ int hibernation_snapshot(int platform_mode)
331 * kernel. 339 * kernel.
332 */ 340 */
333 341
334static int resume_target_kernel(void) 342static int resume_target_kernel(bool platform_mode)
335{ 343{
336 int error; 344 int error;
337 345
338 device_pm_lock(); 346 device_pm_lock();
339 local_irq_disable(); 347
340 error = device_power_down(PMSG_QUIESCE); 348 error = device_power_down(PMSG_QUIESCE);
341 if (error) { 349 if (error) {
342 printk(KERN_ERR "PM: Some devices failed to power down, " 350 printk(KERN_ERR "PM: Some devices failed to power down, "
343 "aborting resume\n"); 351 "aborting resume\n");
344 goto Enable_irqs; 352 goto Unlock;
345 } 353 }
346 sysdev_suspend(PMSG_QUIESCE); 354
355 error = platform_pre_restore(platform_mode);
356 if (error)
357 goto Cleanup;
358
359 error = disable_nonboot_cpus();
360 if (error)
361 goto Enable_cpus;
362
363 local_irq_disable();
364
365 error = sysdev_suspend(PMSG_QUIESCE);
366 if (error)
367 goto Enable_irqs;
368
347 /* We'll ignore saved state, but this gets preempt count (etc) right */ 369 /* We'll ignore saved state, but this gets preempt count (etc) right */
348 save_processor_state(); 370 save_processor_state();
349 error = restore_highmem(); 371 error = restore_highmem();
@@ -366,11 +388,23 @@ static int resume_target_kernel(void)
366 swsusp_free(); 388 swsusp_free();
367 restore_processor_state(); 389 restore_processor_state();
368 touch_softlockup_watchdog(); 390 touch_softlockup_watchdog();
391
369 sysdev_resume(); 392 sysdev_resume();
370 device_power_up(PMSG_RECOVER); 393
371 Enable_irqs: 394 Enable_irqs:
372 local_irq_enable(); 395 local_irq_enable();
396
397 Enable_cpus:
398 enable_nonboot_cpus();
399
400 Cleanup:
401 platform_restore_cleanup(platform_mode);
402
403 device_power_up(PMSG_RECOVER);
404
405 Unlock:
373 device_pm_unlock(); 406 device_pm_unlock();
407
374 return error; 408 return error;
375} 409}
376 410
@@ -390,19 +424,10 @@ int hibernation_restore(int platform_mode)
390 pm_prepare_console(); 424 pm_prepare_console();
391 suspend_console(); 425 suspend_console();
392 error = device_suspend(PMSG_QUIESCE); 426 error = device_suspend(PMSG_QUIESCE);
393 if (error)
394 goto Finish;
395
396 error = platform_pre_restore(platform_mode);
397 if (!error) { 427 if (!error) {
398 error = disable_nonboot_cpus(); 428 error = resume_target_kernel(platform_mode);
399 if (!error) 429 device_resume(PMSG_RECOVER);
400 error = resume_target_kernel();
401 enable_nonboot_cpus();
402 } 430 }
403 platform_restore_cleanup(platform_mode);
404 device_resume(PMSG_RECOVER);
405 Finish:
406 resume_console(); 431 resume_console();
407 pm_restore_console(); 432 pm_restore_console();
408 return error; 433 return error;
@@ -438,38 +463,46 @@ int hibernation_platform_enter(void)
438 goto Resume_devices; 463 goto Resume_devices;
439 } 464 }
440 465
466 device_pm_lock();
467
468 error = device_power_down(PMSG_HIBERNATE);
469 if (error)
470 goto Unlock;
471
441 error = hibernation_ops->prepare(); 472 error = hibernation_ops->prepare();
442 if (error) 473 if (error)
443 goto Resume_devices; 474 goto Platofrm_finish;
444 475
445 error = disable_nonboot_cpus(); 476 error = disable_nonboot_cpus();
446 if (error) 477 if (error)
447 goto Finish; 478 goto Platofrm_finish;
448 479
449 device_pm_lock();
450 local_irq_disable(); 480 local_irq_disable();
451 error = device_power_down(PMSG_HIBERNATE); 481 sysdev_suspend(PMSG_HIBERNATE);
452 if (!error) { 482 hibernation_ops->enter();
453 sysdev_suspend(PMSG_HIBERNATE); 483 /* We should never get here */
454 hibernation_ops->enter(); 484 while (1);
455 /* We should never get here */
456 while (1);
457 }
458 local_irq_enable();
459 device_pm_unlock();
460 485
461 /* 486 /*
462 * We don't need to reenable the nonboot CPUs or resume consoles, since 487 * We don't need to reenable the nonboot CPUs or resume consoles, since
463 * the system is going to be halted anyway. 488 * the system is going to be halted anyway.
464 */ 489 */
465 Finish: 490 Platofrm_finish:
466 hibernation_ops->finish(); 491 hibernation_ops->finish();
492
493 device_power_up(PMSG_RESTORE);
494
495 Unlock:
496 device_pm_unlock();
497
467 Resume_devices: 498 Resume_devices:
468 entering_platform_hibernation = false; 499 entering_platform_hibernation = false;
469 device_resume(PMSG_RESTORE); 500 device_resume(PMSG_RESTORE);
470 resume_console(); 501 resume_console();
502
471 Close: 503 Close:
472 hibernation_ops->end(); 504 hibernation_ops->end();
505
473 return error; 506 return error;
474} 507}
475 508
diff --git a/kernel/power/main.c b/kernel/power/main.c
index c9632f841f64..f172f41858bb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -287,17 +287,32 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
287 */ 287 */
288static int suspend_enter(suspend_state_t state) 288static int suspend_enter(suspend_state_t state)
289{ 289{
290 int error = 0; 290 int error;
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 arch_suspend_disable_irqs();
294 BUG_ON(!irqs_disabled());
295 293
296 if ((error = device_power_down(PMSG_SUSPEND))) { 294 error = device_power_down(PMSG_SUSPEND);
295 if (error) {
297 printk(KERN_ERR "PM: Some devices failed to power down\n"); 296 printk(KERN_ERR "PM: Some devices failed to power down\n");
298 goto Done; 297 goto Done;
299 } 298 }
300 299
300 if (suspend_ops->prepare) {
301 error = suspend_ops->prepare();
302 if (error)
303 goto Power_up_devices;
304 }
305
306 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish;
308
309 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS))
311 goto Enable_cpus;
312
313 arch_suspend_disable_irqs();
314 BUG_ON(!irqs_disabled());
315
301 error = sysdev_suspend(PMSG_SUSPEND); 316 error = sysdev_suspend(PMSG_SUSPEND);
302 if (!error) { 317 if (!error) {
303 if (!suspend_test(TEST_CORE)) 318 if (!suspend_test(TEST_CORE))
@@ -305,11 +320,22 @@ static int suspend_enter(suspend_state_t state)
305 sysdev_resume(); 320 sysdev_resume();
306 } 321 }
307 322
308 device_power_up(PMSG_RESUME);
309 Done:
310 arch_suspend_enable_irqs(); 323 arch_suspend_enable_irqs();
311 BUG_ON(irqs_disabled()); 324 BUG_ON(irqs_disabled());
325
326 Enable_cpus:
327 enable_nonboot_cpus();
328
329 Platfrom_finish:
330 if (suspend_ops->finish)
331 suspend_ops->finish();
332
333 Power_up_devices:
334 device_power_up(PMSG_RESUME);
335
336 Done:
312 device_pm_unlock(); 337 device_pm_unlock();
338
313 return error; 339 return error;
314} 340}
315 341
@@ -341,23 +367,8 @@ int suspend_devices_and_enter(suspend_state_t state)
341 if (suspend_test(TEST_DEVICES)) 367 if (suspend_test(TEST_DEVICES))
342 goto Recover_platform; 368 goto Recover_platform;
343 369
344 if (suspend_ops->prepare) { 370 suspend_enter(state);
345 error = suspend_ops->prepare();
346 if (error)
347 goto Resume_devices;
348 }
349
350 if (suspend_test(TEST_PLATFORM))
351 goto Finish;
352
353 error = disable_nonboot_cpus();
354 if (!error && !suspend_test(TEST_CPUS))
355 suspend_enter(state);
356 371
357 enable_nonboot_cpus();
358 Finish:
359 if (suspend_ops->finish)
360 suspend_ops->finish();
361 Resume_devices: 372 Resume_devices:
362 suspend_test_start(); 373 suspend_test_start();
363 device_resume(PMSG_RESUME); 374 device_resume(PMSG_RESUME);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f5fc2d7680f2..33e2e4a819f9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
321 321
322 INIT_LIST_HEAD(list); 322 INIT_LIST_HEAD(list);
323 323
324 for_each_zone(zone) { 324 for_each_populated_zone(zone) {
325 unsigned long zone_start, zone_end; 325 unsigned long zone_start, zone_end;
326 struct mem_extent *ext, *cur, *aux; 326 struct mem_extent *ext, *cur, *aux;
327 327
328 if (!populated_zone(zone))
329 continue;
330
331 zone_start = zone->zone_start_pfn; 328 zone_start = zone->zone_start_pfn;
332 zone_end = zone->zone_start_pfn + zone->spanned_pages; 329 zone_end = zone->zone_start_pfn + zone->spanned_pages;
333 330
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void)
804 struct zone *zone; 801 struct zone *zone;
805 unsigned int cnt = 0; 802 unsigned int cnt = 0;
806 803
807 for_each_zone(zone) 804 for_each_populated_zone(zone)
808 if (populated_zone(zone) && is_highmem(zone)) 805 if (is_highmem(zone))
809 cnt += zone_page_state(zone, NR_FREE_PAGES); 806 cnt += zone_page_state(zone, NR_FREE_PAGES);
810 807
811 return cnt; 808 return cnt;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index a92c91451559..78c35047586d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -51,6 +51,7 @@
51#include <linux/highmem.h> 51#include <linux/highmem.h>
52#include <linux/time.h> 52#include <linux/time.h>
53#include <linux/rbtree.h> 53#include <linux/rbtree.h>
54#include <linux/io.h>
54 55
55#include "power.h" 56#include "power.h"
56 57
@@ -229,17 +230,16 @@ int swsusp_shrink_memory(void)
229 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; 230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
230 tmp = size; 231 tmp = size;
231 size += highmem_size; 232 size += highmem_size;
232 for_each_zone (zone) 233 for_each_populated_zone(zone) {
233 if (populated_zone(zone)) { 234 tmp += snapshot_additional_pages(zone);
234 tmp += snapshot_additional_pages(zone); 235 if (is_highmem(zone)) {
235 if (is_highmem(zone)) { 236 highmem_size -=
236 highmem_size -=
237 zone_page_state(zone, NR_FREE_PAGES); 237 zone_page_state(zone, NR_FREE_PAGES);
238 } else { 238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES); 239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL]; 240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
241 }
242 } 241 }
242 }
243 243
244 if (highmem_size < 0) 244 if (highmem_size < 0)
245 highmem_size = 0; 245 highmem_size = 0;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0a..9b4a975a4b4a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
126static atomic_t n_rcu_torture_error; 126static atomic_t n_rcu_torture_error;
127static long n_rcu_torture_timers = 0; 127static long n_rcu_torture_timers = 0;
128static struct list_head rcu_torture_removed; 128static struct list_head rcu_torture_removed;
129static cpumask_var_t shuffle_tmp_mask;
129 130
130static int stutter_pause_test = 0; 131static int stutter_pause_test = 0;
131 132
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
889 */ 890 */
890static void rcu_torture_shuffle_tasks(void) 891static void rcu_torture_shuffle_tasks(void)
891{ 892{
892 cpumask_t tmp_mask;
893 int i; 893 int i;
894 894
895 cpus_setall(tmp_mask); 895 cpumask_setall(shuffle_tmp_mask);
896 get_online_cpus(); 896 get_online_cpus();
897 897
898 /* No point in shuffling if there is only one online CPU (ex: UP) */ 898 /* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
902 } 902 }
903 903
904 if (rcu_idle_cpu != -1) 904 if (rcu_idle_cpu != -1)
905 cpu_clear(rcu_idle_cpu, tmp_mask); 905 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
906 906
907 set_cpus_allowed_ptr(current, &tmp_mask); 907 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
908 908
909 if (reader_tasks) { 909 if (reader_tasks) {
910 for (i = 0; i < nrealreaders; i++) 910 for (i = 0; i < nrealreaders; i++)
911 if (reader_tasks[i]) 911 if (reader_tasks[i])
912 set_cpus_allowed_ptr(reader_tasks[i], 912 set_cpus_allowed_ptr(reader_tasks[i],
913 &tmp_mask); 913 shuffle_tmp_mask);
914 } 914 }
915 915
916 if (fakewriter_tasks) { 916 if (fakewriter_tasks) {
917 for (i = 0; i < nfakewriters; i++) 917 for (i = 0; i < nfakewriters; i++)
918 if (fakewriter_tasks[i]) 918 if (fakewriter_tasks[i])
919 set_cpus_allowed_ptr(fakewriter_tasks[i], 919 set_cpus_allowed_ptr(fakewriter_tasks[i],
920 &tmp_mask); 920 shuffle_tmp_mask);
921 } 921 }
922 922
923 if (writer_task) 923 if (writer_task)
924 set_cpus_allowed_ptr(writer_task, &tmp_mask); 924 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
925 925
926 if (stats_task) 926 if (stats_task)
927 set_cpus_allowed_ptr(stats_task, &tmp_mask); 927 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
928 928
929 if (rcu_idle_cpu == -1) 929 if (rcu_idle_cpu == -1)
930 rcu_idle_cpu = num_online_cpus() - 1; 930 rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
1012 if (shuffler_task) { 1012 if (shuffler_task) {
1013 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 1013 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1014 kthread_stop(shuffler_task); 1014 kthread_stop(shuffler_task);
1015 free_cpumask_var(shuffle_tmp_mask);
1015 } 1016 }
1016 shuffler_task = NULL; 1017 shuffler_task = NULL;
1017 1018
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
1190 } 1191 }
1191 if (test_no_idle_hz) { 1192 if (test_no_idle_hz) {
1192 rcu_idle_cpu = num_online_cpus() - 1; 1193 rcu_idle_cpu = num_online_cpus() - 1;
1194
1195 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1196 firsterr = -ENOMEM;
1197 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1198 goto unwind;
1199 }
1200
1193 /* Create the shuffler thread */ 1201 /* Create the shuffler thread */
1194 shuffler_task = kthread_run(rcu_torture_shuffle, NULL, 1202 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1195 "rcu_torture_shuffle"); 1203 "rcu_torture_shuffle");
1196 if (IS_ERR(shuffler_task)) { 1204 if (IS_ERR(shuffler_task)) {
1205 free_cpumask_var(shuffle_tmp_mask);
1197 firsterr = PTR_ERR(shuffler_task); 1206 firsterr = PTR_ERR(shuffler_task);
1198 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); 1207 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1199 shuffler_task = NULL; 1208 shuffler_task = NULL;
diff --git a/kernel/relay.c b/kernel/relay.c
index edc0ba6d8160..824b91ac10f1 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -748,7 +748,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
748 * from the scheduler (trying to re-grab 748 * from the scheduler (trying to re-grab
749 * rq->lock), so defer it. 749 * rq->lock), so defer it.
750 */ 750 */
751 __mod_timer(&buf->timer, jiffies + 1); 751 mod_timer(&buf->timer, jiffies + 1);
752 } 752 }
753 753
754 old = buf->data; 754 old = buf->data;
diff --git a/kernel/sched.c b/kernel/sched.c
index 7299083e69e7..f01cb63d1356 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
331 */ 331 */
332static DEFINE_SPINLOCK(task_group_lock); 332static DEFINE_SPINLOCK(task_group_lock);
333 333
334#ifdef CONFIG_SMP
335static int root_task_group_empty(void)
336{
337 return list_empty(&root_task_group.children);
338}
339#endif
340
334#ifdef CONFIG_FAIR_GROUP_SCHED 341#ifdef CONFIG_FAIR_GROUP_SCHED
335#ifdef CONFIG_USER_SCHED 342#ifdef CONFIG_USER_SCHED
336# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 343# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
391 398
392#else 399#else
393 400
401#ifdef CONFIG_SMP
402static int root_task_group_empty(void)
403{
404 return 1;
405}
406#endif
407
394static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 408static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
395static inline struct task_group *task_group(struct task_struct *p) 409static inline struct task_group *task_group(struct task_struct *p)
396{ 410{
@@ -467,11 +481,17 @@ struct rt_rq {
467 struct rt_prio_array active; 481 struct rt_prio_array active;
468 unsigned long rt_nr_running; 482 unsigned long rt_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 483#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */ 484 struct {
485 int curr; /* highest queued rt task prio */
486#ifdef CONFIG_SMP
487 int next; /* next highest */
488#endif
489 } highest_prio;
471#endif 490#endif
472#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory; 492 unsigned long rt_nr_migratory;
474 int overloaded; 493 int overloaded;
494 struct plist_head pushable_tasks;
475#endif 495#endif
476 int rt_throttled; 496 int rt_throttled;
477 u64 rt_time; 497 u64 rt_time;
@@ -549,7 +569,6 @@ struct rq {
549 unsigned long nr_running; 569 unsigned long nr_running;
550 #define CPU_LOAD_IDX_MAX 5 570 #define CPU_LOAD_IDX_MAX 5
551 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 571 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
552 unsigned char idle_at_tick;
553#ifdef CONFIG_NO_HZ 572#ifdef CONFIG_NO_HZ
554 unsigned long last_tick_seen; 573 unsigned long last_tick_seen;
555 unsigned char in_nohz_recently; 574 unsigned char in_nohz_recently;
@@ -590,6 +609,7 @@ struct rq {
590 struct root_domain *rd; 609 struct root_domain *rd;
591 struct sched_domain *sd; 610 struct sched_domain *sd;
592 611
612 unsigned char idle_at_tick;
593 /* For active balancing */ 613 /* For active balancing */
594 int active_balance; 614 int active_balance;
595 int push_cpu; 615 int push_cpu;
@@ -618,9 +638,6 @@ struct rq {
618 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 638 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
619 639
620 /* sys_sched_yield() stats */ 640 /* sys_sched_yield() stats */
621 unsigned int yld_exp_empty;
622 unsigned int yld_act_empty;
623 unsigned int yld_both_empty;
624 unsigned int yld_count; 641 unsigned int yld_count;
625 642
626 /* schedule() stats */ 643 /* schedule() stats */
@@ -1183,10 +1200,10 @@ static void resched_task(struct task_struct *p)
1183 1200
1184 assert_spin_locked(&task_rq(p)->lock); 1201 assert_spin_locked(&task_rq(p)->lock);
1185 1202
1186 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) 1203 if (test_tsk_need_resched(p))
1187 return; 1204 return;
1188 1205
1189 set_tsk_thread_flag(p, TIF_NEED_RESCHED); 1206 set_tsk_need_resched(p);
1190 1207
1191 cpu = task_cpu(p); 1208 cpu = task_cpu(p);
1192 if (cpu == smp_processor_id()) 1209 if (cpu == smp_processor_id())
@@ -1242,7 +1259,7 @@ void wake_up_idle_cpu(int cpu)
1242 * lockless. The worst case is that the other CPU runs the 1259 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule() 1260 * idle task through an additional NOOP schedule()
1244 */ 1261 */
1245 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); 1262 set_tsk_need_resched(rq->idle);
1246 1263
1247 /* NEED_RESCHED must be visible before we test polling */ 1264 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb(); 1265 smp_mb();
@@ -1610,21 +1627,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1610 1627
1611#endif 1628#endif
1612 1629
1630#ifdef CONFIG_PREEMPT
1631
1613/* 1632/*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1633 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1634 * way at the expense of forcing extra atomic operations in all
1635 * invocations. This assures that the double_lock is acquired using the
1636 * same underlying policy as the spinlock_t on this architecture, which
1637 * reduces latency compared to the unfair variant below. However, it
1638 * also adds more overhead and therefore may reduce throughput.
1615 */ 1639 */
1616static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1640static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(this_rq->lock)
1642 __acquires(busiest->lock)
1643 __acquires(this_rq->lock)
1644{
1645 spin_unlock(&this_rq->lock);
1646 double_rq_lock(this_rq, busiest);
1647
1648 return 1;
1649}
1650
1651#else
1652/*
1653 * Unfair double_lock_balance: Optimizes throughput at the expense of
1654 * latency by eliminating extra atomic operations when the locks are
1655 * already in proper order on entry. This favors lower cpu-ids and will
1656 * grant the double lock to lower cpus over higher ids under contention,
1657 * regardless of entry order into the function.
1658 */
1659static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock) 1660 __releases(this_rq->lock)
1618 __acquires(busiest->lock) 1661 __acquires(busiest->lock)
1619 __acquires(this_rq->lock) 1662 __acquires(this_rq->lock)
1620{ 1663{
1621 int ret = 0; 1664 int ret = 0;
1622 1665
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) { 1666 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) { 1667 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock); 1668 spin_unlock(&this_rq->lock);
@@ -1637,6 +1675,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637 return ret; 1675 return ret;
1638} 1676}
1639 1677
1678#endif /* CONFIG_PREEMPT */
1679
1680/*
1681 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1682 */
1683static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1684{
1685 if (unlikely(!irqs_disabled())) {
1686 /* printk() doesn't work good under rq->lock */
1687 spin_unlock(&this_rq->lock);
1688 BUG_ON(1);
1689 }
1690
1691 return _double_lock_balance(this_rq, busiest);
1692}
1693
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1694static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock) 1695 __releases(busiest->lock)
1642{ 1696{
@@ -1705,6 +1759,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1759
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1760static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1761{
1762 if (wakeup)
1763 p->se.start_runtime = p->se.sum_exec_runtime;
1764
1708 sched_info_queued(p); 1765 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1766 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1767 p->se.on_rq = 1;
@@ -1712,10 +1769,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1769
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1770static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1771{
1715 if (sleep && p->se.last_wakeup) { 1772 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1773 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1774 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1775 p->se.sum_exec_runtime - p->se.last_wakeup);
1776 p->se.last_wakeup = 0;
1777 } else {
1778 update_avg(&p->se.avg_wakeup,
1779 sysctl_sched_wakeup_granularity);
1780 }
1719 } 1781 }
1720 1782
1721 sched_info_dequeued(p); 1783 sched_info_dequeued(p);
@@ -2017,7 +2079,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2017 * it must be off the runqueue _entirely_, and not 2079 * it must be off the runqueue _entirely_, and not
2018 * preempted! 2080 * preempted!
2019 * 2081 *
2020 * So if it wa still runnable (but just not actively 2082 * So if it was still runnable (but just not actively
2021 * running right now), it's preempted, and we should 2083 * running right now), it's preempted, and we should
2022 * yield - it could be a while. 2084 * yield - it could be a while.
2023 */ 2085 */
@@ -2267,7 +2329,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2267 sync = 0; 2329 sync = 0;
2268 2330
2269#ifdef CONFIG_SMP 2331#ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) { 2332 if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
2271 struct sched_domain *sd; 2333 struct sched_domain *sd;
2272 2334
2273 this_cpu = raw_smp_processor_id(); 2335 this_cpu = raw_smp_processor_id();
@@ -2345,6 +2407,22 @@ out_activate:
2345 activate_task(rq, p, 1); 2407 activate_task(rq, p, 1);
2346 success = 1; 2408 success = 1;
2347 2409
2410 /*
2411 * Only attribute actual wakeups done by this task.
2412 */
2413 if (!in_interrupt()) {
2414 struct sched_entity *se = &current->se;
2415 u64 sample = se->sum_exec_runtime;
2416
2417 if (se->last_wakeup)
2418 sample -= se->last_wakeup;
2419 else
2420 sample -= se->start_runtime;
2421 update_avg(&se->avg_wakeup, sample);
2422
2423 se->last_wakeup = se->sum_exec_runtime;
2424 }
2425
2348out_running: 2426out_running:
2349 trace_sched_wakeup(rq, p, success); 2427 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync); 2428 check_preempt_curr(rq, p, sync);
@@ -2355,8 +2433,6 @@ out_running:
2355 p->sched_class->task_wake_up(rq, p); 2433 p->sched_class->task_wake_up(rq, p);
2356#endif 2434#endif
2357out: 2435out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags); 2436 task_rq_unlock(rq, &flags);
2361 2437
2362 return success; 2438 return success;
@@ -2386,6 +2462,8 @@ static void __sched_fork(struct task_struct *p)
2386 p->se.prev_sum_exec_runtime = 0; 2462 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0; 2463 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0; 2464 p->se.avg_overlap = 0;
2465 p->se.start_runtime = 0;
2466 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2389 2467
2390#ifdef CONFIG_SCHEDSTATS 2468#ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0; 2469 p->se.wait_start = 0;
@@ -2448,6 +2526,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2448 /* Want to start with kernel preemption disabled. */ 2526 /* Want to start with kernel preemption disabled. */
2449 task_thread_info(p)->preempt_count = 1; 2527 task_thread_info(p)->preempt_count = 1;
2450#endif 2528#endif
2529 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2530
2451 put_cpu(); 2531 put_cpu();
2452} 2532}
2453 2533
@@ -2491,7 +2571,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2491#ifdef CONFIG_PREEMPT_NOTIFIERS 2571#ifdef CONFIG_PREEMPT_NOTIFIERS
2492 2572
2493/** 2573/**
2494 * preempt_notifier_register - tell me when current is being being preempted & rescheduled 2574 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2495 * @notifier: notifier struct to register 2575 * @notifier: notifier struct to register
2496 */ 2576 */
2497void preempt_notifier_register(struct preempt_notifier *notifier) 2577void preempt_notifier_register(struct preempt_notifier *notifier)
@@ -2588,6 +2668,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2588{ 2668{
2589 struct mm_struct *mm = rq->prev_mm; 2669 struct mm_struct *mm = rq->prev_mm;
2590 long prev_state; 2670 long prev_state;
2671#ifdef CONFIG_SMP
2672 int post_schedule = 0;
2673
2674 if (current->sched_class->needs_post_schedule)
2675 post_schedule = current->sched_class->needs_post_schedule(rq);
2676#endif
2591 2677
2592 rq->prev_mm = NULL; 2678 rq->prev_mm = NULL;
2593 2679
@@ -2606,7 +2692,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2606 finish_arch_switch(prev); 2692 finish_arch_switch(prev);
2607 finish_lock_switch(rq, prev); 2693 finish_lock_switch(rq, prev);
2608#ifdef CONFIG_SMP 2694#ifdef CONFIG_SMP
2609 if (current->sched_class->post_schedule) 2695 if (post_schedule)
2610 current->sched_class->post_schedule(rq); 2696 current->sched_class->post_schedule(rq);
2611#endif 2697#endif
2612 2698
@@ -2913,6 +2999,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2913 struct sched_domain *sd, enum cpu_idle_type idle, 2999 struct sched_domain *sd, enum cpu_idle_type idle,
2914 int *all_pinned) 3000 int *all_pinned)
2915{ 3001{
3002 int tsk_cache_hot = 0;
2916 /* 3003 /*
2917 * We do not migrate tasks that are: 3004 * We do not migrate tasks that are:
2918 * 1) running (obviously), or 3005 * 1) running (obviously), or
@@ -2936,10 +3023,11 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2936 * 2) too many balance attempts have failed. 3023 * 2) too many balance attempts have failed.
2937 */ 3024 */
2938 3025
2939 if (!task_hot(p, rq->clock, sd) || 3026 tsk_cache_hot = task_hot(p, rq->clock, sd);
2940 sd->nr_balance_failed > sd->cache_nice_tries) { 3027 if (!tsk_cache_hot ||
3028 sd->nr_balance_failed > sd->cache_nice_tries) {
2941#ifdef CONFIG_SCHEDSTATS 3029#ifdef CONFIG_SCHEDSTATS
2942 if (task_hot(p, rq->clock, sd)) { 3030 if (tsk_cache_hot) {
2943 schedstat_inc(sd, lb_hot_gained[idle]); 3031 schedstat_inc(sd, lb_hot_gained[idle]);
2944 schedstat_inc(p, se.nr_forced_migrations); 3032 schedstat_inc(p, se.nr_forced_migrations);
2945 } 3033 }
@@ -2947,7 +3035,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2947 return 1; 3035 return 1;
2948 } 3036 }
2949 3037
2950 if (task_hot(p, rq->clock, sd)) { 3038 if (tsk_cache_hot) {
2951 schedstat_inc(p, se.nr_failed_migrations_hot); 3039 schedstat_inc(p, se.nr_failed_migrations_hot);
2952 return 0; 3040 return 0;
2953 } 3041 }
@@ -2987,6 +3075,16 @@ next:
2987 pulled++; 3075 pulled++;
2988 rem_load_move -= p->se.load.weight; 3076 rem_load_move -= p->se.load.weight;
2989 3077
3078#ifdef CONFIG_PREEMPT
3079 /*
3080 * NEWIDLE balancing is a source of latency, so preemptible kernels
3081 * will stop after the first task is pulled to minimize the critical
3082 * section.
3083 */
3084 if (idle == CPU_NEWLY_IDLE)
3085 goto out;
3086#endif
3087
2990 /* 3088 /*
2991 * We only want to steal up to the prescribed amount of weighted load. 3089 * We only want to steal up to the prescribed amount of weighted load.
2992 */ 3090 */
@@ -3033,9 +3131,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3033 sd, idle, all_pinned, &this_best_prio); 3131 sd, idle, all_pinned, &this_best_prio);
3034 class = class->next; 3132 class = class->next;
3035 3133
3134#ifdef CONFIG_PREEMPT
3135 /*
3136 * NEWIDLE balancing is a source of latency, so preemptible
3137 * kernels will stop after the first task is pulled to minimize
3138 * the critical section.
3139 */
3036 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3140 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3037 break; 3141 break;
3038 3142#endif
3039 } while (class && max_load_move > total_load_moved); 3143 } while (class && max_load_move > total_load_moved);
3040 3144
3041 return total_load_moved > 0; 3145 return total_load_moved > 0;
@@ -3085,246 +3189,480 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3085 3189
3086 return 0; 3190 return 0;
3087} 3191}
3088 3192/********** Helpers for find_busiest_group ************************/
3089/* 3193/*
3090 * find_busiest_group finds and returns the busiest CPU group within the 3194 * sd_lb_stats - Structure to store the statistics of a sched_domain
3091 * domain. It calculates and returns the amount of weighted load which 3195 * during load balancing.
3092 * should be moved to restore balance via the imbalance parameter.
3093 */ 3196 */
3094static struct sched_group * 3197struct sd_lb_stats {
3095find_busiest_group(struct sched_domain *sd, int this_cpu, 3198 struct sched_group *busiest; /* Busiest group in this sd */
3096 unsigned long *imbalance, enum cpu_idle_type idle, 3199 struct sched_group *this; /* Local group in this sd */
3097 int *sd_idle, const struct cpumask *cpus, int *balance) 3200 unsigned long total_load; /* Total load of all groups in sd */
3098{ 3201 unsigned long total_pwr; /* Total power of all groups in sd */
3099 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3202 unsigned long avg_load; /* Average load across all groups in sd */
3100 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 3203
3101 unsigned long max_pull; 3204 /** Statistics of this group */
3102 unsigned long busiest_load_per_task, busiest_nr_running; 3205 unsigned long this_load;
3103 unsigned long this_load_per_task, this_nr_running; 3206 unsigned long this_load_per_task;
3104 int load_idx, group_imb = 0; 3207 unsigned long this_nr_running;
3208
3209 /* Statistics of the busiest group */
3210 unsigned long max_load;
3211 unsigned long busiest_load_per_task;
3212 unsigned long busiest_nr_running;
3213
3214 int group_imb; /* Is there imbalance in this sd */
3105#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3215#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3106 int power_savings_balance = 1; 3216 int power_savings_balance; /* Is powersave balance needed for this sd */
3107 unsigned long leader_nr_running = 0, min_load_per_task = 0; 3217 struct sched_group *group_min; /* Least loaded group in sd */
3108 unsigned long min_nr_running = ULONG_MAX; 3218 struct sched_group *group_leader; /* Group which relieves group_min */
3109 struct sched_group *group_min = NULL, *group_leader = NULL; 3219 unsigned long min_load_per_task; /* load_per_task in group_min */
3220 unsigned long leader_nr_running; /* Nr running of group_leader */
3221 unsigned long min_nr_running; /* Nr running of group_min */
3110#endif 3222#endif
3223};
3224
3225/*
3226 * sg_lb_stats - stats of a sched_group required for load_balancing
3227 */
3228struct sg_lb_stats {
3229 unsigned long avg_load; /*Avg load across the CPUs of the group */
3230 unsigned long group_load; /* Total load over the CPUs of the group */
3231 unsigned long sum_nr_running; /* Nr tasks running in the group */
3232 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3233 unsigned long group_capacity;
3234 int group_imb; /* Is there an imbalance in the group ? */
3235};
3236
3237/**
3238 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3239 * @group: The group whose first cpu is to be returned.
3240 */
3241static inline unsigned int group_first_cpu(struct sched_group *group)
3242{
3243 return cpumask_first(sched_group_cpus(group));
3244}
3111 3245
3112 max_load = this_load = total_load = total_pwr = 0; 3246/**
3113 busiest_load_per_task = busiest_nr_running = 0; 3247 * get_sd_load_idx - Obtain the load index for a given sched domain.
3114 this_load_per_task = this_nr_running = 0; 3248 * @sd: The sched_domain whose load_idx is to be obtained.
3249 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3250 */
3251static inline int get_sd_load_idx(struct sched_domain *sd,
3252 enum cpu_idle_type idle)
3253{
3254 int load_idx;
3115 3255
3116 if (idle == CPU_NOT_IDLE) 3256 switch (idle) {
3257 case CPU_NOT_IDLE:
3117 load_idx = sd->busy_idx; 3258 load_idx = sd->busy_idx;
3118 else if (idle == CPU_NEWLY_IDLE) 3259 break;
3260
3261 case CPU_NEWLY_IDLE:
3119 load_idx = sd->newidle_idx; 3262 load_idx = sd->newidle_idx;
3120 else 3263 break;
3264 default:
3121 load_idx = sd->idle_idx; 3265 load_idx = sd->idle_idx;
3266 break;
3267 }
3122 3268
3123 do { 3269 return load_idx;
3124 unsigned long load, group_capacity, max_cpu_load, min_cpu_load; 3270}
3125 int local_group;
3126 int i;
3127 int __group_imb = 0;
3128 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3129 unsigned long sum_nr_running, sum_weighted_load;
3130 unsigned long sum_avg_load_per_task;
3131 unsigned long avg_load_per_task;
3132 3271
3133 local_group = cpumask_test_cpu(this_cpu,
3134 sched_group_cpus(group));
3135 3272
3136 if (local_group) 3273#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3137 balance_cpu = cpumask_first(sched_group_cpus(group)); 3274/**
3275 * init_sd_power_savings_stats - Initialize power savings statistics for
3276 * the given sched_domain, during load balancing.
3277 *
3278 * @sd: Sched domain whose power-savings statistics are to be initialized.
3279 * @sds: Variable containing the statistics for sd.
3280 * @idle: Idle status of the CPU at which we're performing load-balancing.
3281 */
3282static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3283 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3284{
3285 /*
3286 * Busy processors will not participate in power savings
3287 * balance.
3288 */
3289 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3290 sds->power_savings_balance = 0;
3291 else {
3292 sds->power_savings_balance = 1;
3293 sds->min_nr_running = ULONG_MAX;
3294 sds->leader_nr_running = 0;
3295 }
3296}
3138 3297
3139 /* Tally up the load of all CPUs in the group */ 3298/**
3140 sum_weighted_load = sum_nr_running = avg_load = 0; 3299 * update_sd_power_savings_stats - Update the power saving stats for a
3141 sum_avg_load_per_task = avg_load_per_task = 0; 3300 * sched_domain while performing load balancing.
3301 *
3302 * @group: sched_group belonging to the sched_domain under consideration.
3303 * @sds: Variable containing the statistics of the sched_domain
3304 * @local_group: Does group contain the CPU for which we're performing
3305 * load balancing ?
3306 * @sgs: Variable containing the statistics of the group.
3307 */
3308static inline void update_sd_power_savings_stats(struct sched_group *group,
3309 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3310{
3142 3311
3143 max_cpu_load = 0; 3312 if (!sds->power_savings_balance)
3144 min_cpu_load = ~0UL; 3313 return;
3145 3314
3146 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3315 /*
3147 struct rq *rq = cpu_rq(i); 3316 * If the local group is idle or completely loaded
3317 * no need to do power savings balance at this domain
3318 */
3319 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3320 !sds->this_nr_running))
3321 sds->power_savings_balance = 0;
3148 3322
3149 if (*sd_idle && rq->nr_running) 3323 /*
3150 *sd_idle = 0; 3324 * If a group is already running at full capacity or idle,
3325 * don't include that group in power savings calculations
3326 */
3327 if (!sds->power_savings_balance ||
3328 sgs->sum_nr_running >= sgs->group_capacity ||
3329 !sgs->sum_nr_running)
3330 return;
3151 3331
3152 /* Bias balancing toward cpus of our domain */ 3332 /*
3153 if (local_group) { 3333 * Calculate the group which has the least non-idle load.
3154 if (idle_cpu(i) && !first_idle_cpu) { 3334 * This is the group from where we need to pick up the load
3155 first_idle_cpu = 1; 3335 * for saving power
3156 balance_cpu = i; 3336 */
3157 } 3337 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3338 (sgs->sum_nr_running == sds->min_nr_running &&
3339 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3340 sds->group_min = group;
3341 sds->min_nr_running = sgs->sum_nr_running;
3342 sds->min_load_per_task = sgs->sum_weighted_load /
3343 sgs->sum_nr_running;
3344 }
3158 3345
3159 load = target_load(i, load_idx); 3346 /*
3160 } else { 3347 * Calculate the group which is almost near its
3161 load = source_load(i, load_idx); 3348 * capacity but still has some space to pick up some load
3162 if (load > max_cpu_load) 3349 * from other group and save more power
3163 max_cpu_load = load; 3350 */
3164 if (min_cpu_load > load) 3351 if (sgs->sum_nr_running > sgs->group_capacity - 1)
3165 min_cpu_load = load; 3352 return;
3166 }
3167 3353
3168 avg_load += load; 3354 if (sgs->sum_nr_running > sds->leader_nr_running ||
3169 sum_nr_running += rq->nr_running; 3355 (sgs->sum_nr_running == sds->leader_nr_running &&
3170 sum_weighted_load += weighted_cpuload(i); 3356 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3357 sds->group_leader = group;
3358 sds->leader_nr_running = sgs->sum_nr_running;
3359 }
3360}
3171 3361
3172 sum_avg_load_per_task += cpu_avg_load_per_task(i); 3362/**
3173 } 3363 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3364 * @sds: Variable containing the statistics of the sched_domain
3365 * under consideration.
3366 * @this_cpu: Cpu at which we're currently performing load-balancing.
3367 * @imbalance: Variable to store the imbalance.
3368 *
3369 * Description:
3370 * Check if we have potential to perform some power-savings balance.
3371 * If yes, set the busiest group to be the least loaded group in the
3372 * sched_domain, so that it's CPUs can be put to idle.
3373 *
3374 * Returns 1 if there is potential to perform power-savings balance.
3375 * Else returns 0.
3376 */
3377static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3378 int this_cpu, unsigned long *imbalance)
3379{
3380 if (!sds->power_savings_balance)
3381 return 0;
3174 3382
3175 /* 3383 if (sds->this != sds->group_leader ||
3176 * First idle cpu or the first cpu(busiest) in this sched group 3384 sds->group_leader == sds->group_min)
3177 * is eligible for doing load balancing at this and above 3385 return 0;
3178 * domains. In the newly idle case, we will allow all the cpu's
3179 * to do the newly idle load balance.
3180 */
3181 if (idle != CPU_NEWLY_IDLE && local_group &&
3182 balance_cpu != this_cpu && balance) {
3183 *balance = 0;
3184 goto ret;
3185 }
3186 3386
3187 total_load += avg_load; 3387 *imbalance = sds->min_load_per_task;
3188 total_pwr += group->__cpu_power; 3388 sds->busiest = sds->group_min;
3189 3389
3190 /* Adjust by relative CPU power of the group */ 3390 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3191 avg_load = sg_div_cpu_power(group, 3391 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3192 avg_load * SCHED_LOAD_SCALE); 3392 group_first_cpu(sds->group_leader);
3393 }
3193 3394
3395 return 1;
3194 3396
3195 /* 3397}
3196 * Consider the group unbalanced when the imbalance is larger 3398#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3197 * than the average weight of two tasks. 3399static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3198 * 3400 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3199 * APZ: with cgroup the avg task weight can vary wildly and 3401{
3200 * might not be a suitable number - should we keep a 3402 return;
3201 * normalized nr_running number somewhere that negates 3403}
3202 * the hierarchy?
3203 */
3204 avg_load_per_task = sg_div_cpu_power(group,
3205 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3206 3404
3207 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3405static inline void update_sd_power_savings_stats(struct sched_group *group,
3208 __group_imb = 1; 3406 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3407{
3408 return;
3409}
3410
3411static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3412 int this_cpu, unsigned long *imbalance)
3413{
3414 return 0;
3415}
3416#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3417
3418
3419/**
3420 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3421 * @group: sched_group whose statistics are to be updated.
3422 * @this_cpu: Cpu for which load balance is currently performed.
3423 * @idle: Idle status of this_cpu
3424 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3425 * @sd_idle: Idle status of the sched_domain containing group.
3426 * @local_group: Does group contain this_cpu.
3427 * @cpus: Set of cpus considered for load balancing.
3428 * @balance: Should we balance.
3429 * @sgs: variable to hold the statistics for this group.
3430 */
3431static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3432 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3433 int local_group, const struct cpumask *cpus,
3434 int *balance, struct sg_lb_stats *sgs)
3435{
3436 unsigned long load, max_cpu_load, min_cpu_load;
3437 int i;
3438 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3439 unsigned long sum_avg_load_per_task;
3440 unsigned long avg_load_per_task;
3209 3441
3210 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3442 if (local_group)
3443 balance_cpu = group_first_cpu(group);
3211 3444
3445 /* Tally up the load of all CPUs in the group */
3446 sum_avg_load_per_task = avg_load_per_task = 0;
3447 max_cpu_load = 0;
3448 min_cpu_load = ~0UL;
3449
3450 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3451 struct rq *rq = cpu_rq(i);
3452
3453 if (*sd_idle && rq->nr_running)
3454 *sd_idle = 0;
3455
3456 /* Bias balancing toward cpus of our domain */
3212 if (local_group) { 3457 if (local_group) {
3213 this_load = avg_load; 3458 if (idle_cpu(i) && !first_idle_cpu) {
3214 this = group; 3459 first_idle_cpu = 1;
3215 this_nr_running = sum_nr_running; 3460 balance_cpu = i;
3216 this_load_per_task = sum_weighted_load; 3461 }
3217 } else if (avg_load > max_load && 3462
3218 (sum_nr_running > group_capacity || __group_imb)) { 3463 load = target_load(i, load_idx);
3219 max_load = avg_load; 3464 } else {
3220 busiest = group; 3465 load = source_load(i, load_idx);
3221 busiest_nr_running = sum_nr_running; 3466 if (load > max_cpu_load)
3222 busiest_load_per_task = sum_weighted_load; 3467 max_cpu_load = load;
3223 group_imb = __group_imb; 3468 if (min_cpu_load > load)
3469 min_cpu_load = load;
3224 } 3470 }
3225 3471
3226#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3472 sgs->group_load += load;
3227 /* 3473 sgs->sum_nr_running += rq->nr_running;
3228 * Busy processors will not participate in power savings 3474 sgs->sum_weighted_load += weighted_cpuload(i);
3229 * balance.
3230 */
3231 if (idle == CPU_NOT_IDLE ||
3232 !(sd->flags & SD_POWERSAVINGS_BALANCE))
3233 goto group_next;
3234 3475
3235 /* 3476 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3236 * If the local group is idle or completely loaded 3477 }
3237 * no need to do power savings balance at this domain
3238 */
3239 if (local_group && (this_nr_running >= group_capacity ||
3240 !this_nr_running))
3241 power_savings_balance = 0;
3242 3478
3243 /* 3479 /*
3244 * If a group is already running at full capacity or idle, 3480 * First idle cpu or the first cpu(busiest) in this sched group
3245 * don't include that group in power savings calculations 3481 * is eligible for doing load balancing at this and above
3246 */ 3482 * domains. In the newly idle case, we will allow all the cpu's
3247 if (!power_savings_balance || sum_nr_running >= group_capacity 3483 * to do the newly idle load balance.
3248 || !sum_nr_running) 3484 */
3249 goto group_next; 3485 if (idle != CPU_NEWLY_IDLE && local_group &&
3486 balance_cpu != this_cpu && balance) {
3487 *balance = 0;
3488 return;
3489 }
3250 3490
3251 /* 3491 /* Adjust by relative CPU power of the group */
3252 * Calculate the group which has the least non-idle load. 3492 sgs->avg_load = sg_div_cpu_power(group,
3253 * This is the group from where we need to pick up the load 3493 sgs->group_load * SCHED_LOAD_SCALE);
3254 * for saving power
3255 */
3256 if ((sum_nr_running < min_nr_running) ||
3257 (sum_nr_running == min_nr_running &&
3258 cpumask_first(sched_group_cpus(group)) >
3259 cpumask_first(sched_group_cpus(group_min)))) {
3260 group_min = group;
3261 min_nr_running = sum_nr_running;
3262 min_load_per_task = sum_weighted_load /
3263 sum_nr_running;
3264 }
3265 3494
3266 /* 3495
3267 * Calculate the group which is almost near its 3496 /*
3268 * capacity but still has some space to pick up some load 3497 * Consider the group unbalanced when the imbalance is larger
3269 * from other group and save more power 3498 * than the average weight of two tasks.
3270 */ 3499 *
3271 if (sum_nr_running <= group_capacity - 1) { 3500 * APZ: with cgroup the avg task weight can vary wildly and
3272 if (sum_nr_running > leader_nr_running || 3501 * might not be a suitable number - should we keep a
3273 (sum_nr_running == leader_nr_running && 3502 * normalized nr_running number somewhere that negates
3274 cpumask_first(sched_group_cpus(group)) < 3503 * the hierarchy?
3275 cpumask_first(sched_group_cpus(group_leader)))) { 3504 */
3276 group_leader = group; 3505 avg_load_per_task = sg_div_cpu_power(group,
3277 leader_nr_running = sum_nr_running; 3506 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3278 } 3507
3508 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3509 sgs->group_imb = 1;
3510
3511 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3512
3513}
3514
3515/**
3516 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3517 * @sd: sched_domain whose statistics are to be updated.
3518 * @this_cpu: Cpu for which load balance is currently performed.
3519 * @idle: Idle status of this_cpu
3520 * @sd_idle: Idle status of the sched_domain containing group.
3521 * @cpus: Set of cpus considered for load balancing.
3522 * @balance: Should we balance.
3523 * @sds: variable to hold the statistics for this sched_domain.
3524 */
3525static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3526 enum cpu_idle_type idle, int *sd_idle,
3527 const struct cpumask *cpus, int *balance,
3528 struct sd_lb_stats *sds)
3529{
3530 struct sched_group *group = sd->groups;
3531 struct sg_lb_stats sgs;
3532 int load_idx;
3533
3534 init_sd_power_savings_stats(sd, sds, idle);
3535 load_idx = get_sd_load_idx(sd, idle);
3536
3537 do {
3538 int local_group;
3539
3540 local_group = cpumask_test_cpu(this_cpu,
3541 sched_group_cpus(group));
3542 memset(&sgs, 0, sizeof(sgs));
3543 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
3544 local_group, cpus, balance, &sgs);
3545
3546 if (local_group && balance && !(*balance))
3547 return;
3548
3549 sds->total_load += sgs.group_load;
3550 sds->total_pwr += group->__cpu_power;
3551
3552 if (local_group) {
3553 sds->this_load = sgs.avg_load;
3554 sds->this = group;
3555 sds->this_nr_running = sgs.sum_nr_running;
3556 sds->this_load_per_task = sgs.sum_weighted_load;
3557 } else if (sgs.avg_load > sds->max_load &&
3558 (sgs.sum_nr_running > sgs.group_capacity ||
3559 sgs.group_imb)) {
3560 sds->max_load = sgs.avg_load;
3561 sds->busiest = group;
3562 sds->busiest_nr_running = sgs.sum_nr_running;
3563 sds->busiest_load_per_task = sgs.sum_weighted_load;
3564 sds->group_imb = sgs.group_imb;
3279 } 3565 }
3280group_next: 3566
3281#endif 3567 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3282 group = group->next; 3568 group = group->next;
3283 } while (group != sd->groups); 3569 } while (group != sd->groups);
3284 3570
3285 if (!busiest || this_load >= max_load || busiest_nr_running == 0) 3571}
3286 goto out_balanced;
3287
3288 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
3289 3572
3290 if (this_load >= avg_load || 3573/**
3291 100*max_load <= sd->imbalance_pct*this_load) 3574 * fix_small_imbalance - Calculate the minor imbalance that exists
3292 goto out_balanced; 3575 * amongst the groups of a sched_domain, during
3576 * load balancing.
3577 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3578 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3579 * @imbalance: Variable to store the imbalance.
3580 */
3581static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3582 int this_cpu, unsigned long *imbalance)
3583{
3584 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3585 unsigned int imbn = 2;
3586
3587 if (sds->this_nr_running) {
3588 sds->this_load_per_task /= sds->this_nr_running;
3589 if (sds->busiest_load_per_task >
3590 sds->this_load_per_task)
3591 imbn = 1;
3592 } else
3593 sds->this_load_per_task =
3594 cpu_avg_load_per_task(this_cpu);
3293 3595
3294 busiest_load_per_task /= busiest_nr_running; 3596 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3295 if (group_imb) 3597 sds->busiest_load_per_task * imbn) {
3296 busiest_load_per_task = min(busiest_load_per_task, avg_load); 3598 *imbalance = sds->busiest_load_per_task;
3599 return;
3600 }
3297 3601
3298 /* 3602 /*
3299 * We're trying to get all the cpus to the average_load, so we don't 3603 * OK, we don't have enough imbalance to justify moving tasks,
3300 * want to push ourselves above the average load, nor do we wish to 3604 * however we may be able to increase total CPU power used by
3301 * reduce the max loaded cpu below the average load, as either of these 3605 * moving them.
3302 * actions would just result in more rebalancing later, and ping-pong
3303 * tasks around. Thus we look for the minimum possible imbalance.
3304 * Negative imbalances (*we* are more loaded than anyone else) will
3305 * be counted as no imbalance for these purposes -- we can't fix that
3306 * by pulling tasks to us. Be careful of negative numbers as they'll
3307 * appear as very large values with unsigned longs.
3308 */ 3606 */
3309 if (max_load <= busiest_load_per_task)
3310 goto out_balanced;
3311 3607
3608 pwr_now += sds->busiest->__cpu_power *
3609 min(sds->busiest_load_per_task, sds->max_load);
3610 pwr_now += sds->this->__cpu_power *
3611 min(sds->this_load_per_task, sds->this_load);
3612 pwr_now /= SCHED_LOAD_SCALE;
3613
3614 /* Amount of load we'd subtract */
3615 tmp = sg_div_cpu_power(sds->busiest,
3616 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3617 if (sds->max_load > tmp)
3618 pwr_move += sds->busiest->__cpu_power *
3619 min(sds->busiest_load_per_task, sds->max_load - tmp);
3620
3621 /* Amount of load we'd add */
3622 if (sds->max_load * sds->busiest->__cpu_power <
3623 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3624 tmp = sg_div_cpu_power(sds->this,
3625 sds->max_load * sds->busiest->__cpu_power);
3626 else
3627 tmp = sg_div_cpu_power(sds->this,
3628 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3629 pwr_move += sds->this->__cpu_power *
3630 min(sds->this_load_per_task, sds->this_load + tmp);
3631 pwr_move /= SCHED_LOAD_SCALE;
3632
3633 /* Move if we gain throughput */
3634 if (pwr_move > pwr_now)
3635 *imbalance = sds->busiest_load_per_task;
3636}
3637
3638/**
3639 * calculate_imbalance - Calculate the amount of imbalance present within the
3640 * groups of a given sched_domain during load balance.
3641 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3642 * @this_cpu: Cpu for which currently load balance is being performed.
3643 * @imbalance: The variable to store the imbalance.
3644 */
3645static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3646 unsigned long *imbalance)
3647{
3648 unsigned long max_pull;
3312 /* 3649 /*
3313 * In the presence of smp nice balancing, certain scenarios can have 3650 * In the presence of smp nice balancing, certain scenarios can have
3314 * max load less than avg load(as we skip the groups at or below 3651 * max load less than avg load(as we skip the groups at or below
3315 * its cpu_power, while calculating max_load..) 3652 * its cpu_power, while calculating max_load..)
3316 */ 3653 */
3317 if (max_load < avg_load) { 3654 if (sds->max_load < sds->avg_load) {
3318 *imbalance = 0; 3655 *imbalance = 0;
3319 goto small_imbalance; 3656 return fix_small_imbalance(sds, this_cpu, imbalance);
3320 } 3657 }
3321 3658
3322 /* Don't want to pull so many tasks that a group would go idle */ 3659 /* Don't want to pull so many tasks that a group would go idle */
3323 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); 3660 max_pull = min(sds->max_load - sds->avg_load,
3661 sds->max_load - sds->busiest_load_per_task);
3324 3662
3325 /* How much load to actually move to equalise the imbalance */ 3663 /* How much load to actually move to equalise the imbalance */
3326 *imbalance = min(max_pull * busiest->__cpu_power, 3664 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3327 (avg_load - this_load) * this->__cpu_power) 3665 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
3328 / SCHED_LOAD_SCALE; 3666 / SCHED_LOAD_SCALE;
3329 3667
3330 /* 3668 /*
@@ -3333,78 +3671,110 @@ group_next:
3333 * a think about bumping its value to force at least one task to be 3671 * a think about bumping its value to force at least one task to be
3334 * moved 3672 * moved
3335 */ 3673 */
3336 if (*imbalance < busiest_load_per_task) { 3674 if (*imbalance < sds->busiest_load_per_task)
3337 unsigned long tmp, pwr_now, pwr_move; 3675 return fix_small_imbalance(sds, this_cpu, imbalance);
3338 unsigned int imbn;
3339
3340small_imbalance:
3341 pwr_move = pwr_now = 0;
3342 imbn = 2;
3343 if (this_nr_running) {
3344 this_load_per_task /= this_nr_running;
3345 if (busiest_load_per_task > this_load_per_task)
3346 imbn = 1;
3347 } else
3348 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3349 3676
3350 if (max_load - this_load + busiest_load_per_task >= 3677}
3351 busiest_load_per_task * imbn) { 3678/******* find_busiest_group() helpers end here *********************/
3352 *imbalance = busiest_load_per_task;
3353 return busiest;
3354 }
3355 3679
3356 /* 3680/**
3357 * OK, we don't have enough imbalance to justify moving tasks, 3681 * find_busiest_group - Returns the busiest group within the sched_domain
3358 * however we may be able to increase total CPU power used by 3682 * if there is an imbalance. If there isn't an imbalance, and
3359 * moving them. 3683 * the user has opted for power-savings, it returns a group whose
3360 */ 3684 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3685 * such a group exists.
3686 *
3687 * Also calculates the amount of weighted load which should be moved
3688 * to restore balance.
3689 *
3690 * @sd: The sched_domain whose busiest group is to be returned.
3691 * @this_cpu: The cpu for which load balancing is currently being performed.
3692 * @imbalance: Variable which stores amount of weighted load which should
3693 * be moved to restore balance/put a group to idle.
3694 * @idle: The idle status of this_cpu.
3695 * @sd_idle: The idleness of sd
3696 * @cpus: The set of CPUs under consideration for load-balancing.
3697 * @balance: Pointer to a variable indicating if this_cpu
3698 * is the appropriate cpu to perform load balancing at this_level.
3699 *
3700 * Returns: - the busiest group if imbalance exists.
3701 * - If no imbalance and user has opted for power-savings balance,
3702 * return the least loaded group whose CPUs can be
3703 * put to idle by rebalancing its tasks onto our group.
3704 */
3705static struct sched_group *
3706find_busiest_group(struct sched_domain *sd, int this_cpu,
3707 unsigned long *imbalance, enum cpu_idle_type idle,
3708 int *sd_idle, const struct cpumask *cpus, int *balance)
3709{
3710 struct sd_lb_stats sds;
3361 3711
3362 pwr_now += busiest->__cpu_power * 3712 memset(&sds, 0, sizeof(sds));
3363 min(busiest_load_per_task, max_load);
3364 pwr_now += this->__cpu_power *
3365 min(this_load_per_task, this_load);
3366 pwr_now /= SCHED_LOAD_SCALE;
3367
3368 /* Amount of load we'd subtract */
3369 tmp = sg_div_cpu_power(busiest,
3370 busiest_load_per_task * SCHED_LOAD_SCALE);
3371 if (max_load > tmp)
3372 pwr_move += busiest->__cpu_power *
3373 min(busiest_load_per_task, max_load - tmp);
3374
3375 /* Amount of load we'd add */
3376 if (max_load * busiest->__cpu_power <
3377 busiest_load_per_task * SCHED_LOAD_SCALE)
3378 tmp = sg_div_cpu_power(this,
3379 max_load * busiest->__cpu_power);
3380 else
3381 tmp = sg_div_cpu_power(this,
3382 busiest_load_per_task * SCHED_LOAD_SCALE);
3383 pwr_move += this->__cpu_power *
3384 min(this_load_per_task, this_load + tmp);
3385 pwr_move /= SCHED_LOAD_SCALE;
3386 3713
3387 /* Move if we gain throughput */ 3714 /*
3388 if (pwr_move > pwr_now) 3715 * Compute the various statistics relavent for load balancing at
3389 *imbalance = busiest_load_per_task; 3716 * this level.
3390 } 3717 */
3718 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3719 balance, &sds);
3720
3721 /* Cases where imbalance does not exist from POV of this_cpu */
3722 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3723 * at this level.
3724 * 2) There is no busy sibling group to pull from.
3725 * 3) This group is the busiest group.
3726 * 4) This group is more busy than the avg busieness at this
3727 * sched_domain.
3728 * 5) The imbalance is within the specified limit.
3729 * 6) Any rebalance would lead to ping-pong
3730 */
3731 if (balance && !(*balance))
3732 goto ret;
3391 3733
3392 return busiest; 3734 if (!sds.busiest || sds.busiest_nr_running == 0)
3735 goto out_balanced;
3393 3736
3394out_balanced: 3737 if (sds.this_load >= sds.max_load)
3395#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3738 goto out_balanced;
3396 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3397 goto ret;
3398 3739
3399 if (this == group_leader && group_leader != group_min) { 3740 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3400 *imbalance = min_load_per_task; 3741
3401 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3742 if (sds.this_load >= sds.avg_load)
3402 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3743 goto out_balanced;
3403 cpumask_first(sched_group_cpus(group_leader)); 3744
3404 } 3745 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3405 return group_min; 3746 goto out_balanced;
3406 } 3747
3407#endif 3748 sds.busiest_load_per_task /= sds.busiest_nr_running;
3749 if (sds.group_imb)
3750 sds.busiest_load_per_task =
3751 min(sds.busiest_load_per_task, sds.avg_load);
3752
3753 /*
3754 * We're trying to get all the cpus to the average_load, so we don't
3755 * want to push ourselves above the average load, nor do we wish to
3756 * reduce the max loaded cpu below the average load, as either of these
3757 * actions would just result in more rebalancing later, and ping-pong
3758 * tasks around. Thus we look for the minimum possible imbalance.
3759 * Negative imbalances (*we* are more loaded than anyone else) will
3760 * be counted as no imbalance for these purposes -- we can't fix that
3761 * by pulling tasks to us. Be careful of negative numbers as they'll
3762 * appear as very large values with unsigned longs.
3763 */
3764 if (sds.max_load <= sds.busiest_load_per_task)
3765 goto out_balanced;
3766
3767 /* Looks like there is an imbalance. Compute it */
3768 calculate_imbalance(&sds, this_cpu, imbalance);
3769 return sds.busiest;
3770
3771out_balanced:
3772 /*
3773 * There is no obvious imbalance. But check if we can do some balancing
3774 * to save power.
3775 */
3776 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3777 return sds.busiest;
3408ret: 3778ret:
3409 *imbalance = 0; 3779 *imbalance = 0;
3410 return NULL; 3780 return NULL;
@@ -4057,6 +4427,11 @@ static void run_rebalance_domains(struct softirq_action *h)
4057#endif 4427#endif
4058} 4428}
4059 4429
4430static inline int on_null_domain(int cpu)
4431{
4432 return !rcu_dereference(cpu_rq(cpu)->sd);
4433}
4434
4060/* 4435/*
4061 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 4436 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4062 * 4437 *
@@ -4114,7 +4489,9 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4114 cpumask_test_cpu(cpu, nohz.cpu_mask)) 4489 cpumask_test_cpu(cpu, nohz.cpu_mask))
4115 return; 4490 return;
4116#endif 4491#endif
4117 if (time_after_eq(jiffies, rq->next_balance)) 4492 /* Don't need to rebalance while attached to NULL domain */
4493 if (time_after_eq(jiffies, rq->next_balance) &&
4494 likely(!on_null_domain(cpu)))
4118 raise_softirq(SCHED_SOFTIRQ); 4495 raise_softirq(SCHED_SOFTIRQ);
4119} 4496}
4120 4497
@@ -4508,11 +4885,33 @@ static inline void schedule_debug(struct task_struct *prev)
4508#endif 4885#endif
4509} 4886}
4510 4887
4888static void put_prev_task(struct rq *rq, struct task_struct *prev)
4889{
4890 if (prev->state == TASK_RUNNING) {
4891 u64 runtime = prev->se.sum_exec_runtime;
4892
4893 runtime -= prev->se.prev_sum_exec_runtime;
4894 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
4895
4896 /*
4897 * In order to avoid avg_overlap growing stale when we are
4898 * indeed overlapping and hence not getting put to sleep, grow
4899 * the avg_overlap on preemption.
4900 *
4901 * We use the average preemption runtime because that
4902 * correlates to the amount of cache footprint a task can
4903 * build up.
4904 */
4905 update_avg(&prev->se.avg_overlap, runtime);
4906 }
4907 prev->sched_class->put_prev_task(rq, prev);
4908}
4909
4511/* 4910/*
4512 * Pick up the highest-prio task: 4911 * Pick up the highest-prio task:
4513 */ 4912 */
4514static inline struct task_struct * 4913static inline struct task_struct *
4515pick_next_task(struct rq *rq, struct task_struct *prev) 4914pick_next_task(struct rq *rq)
4516{ 4915{
4517 const struct sched_class *class; 4916 const struct sched_class *class;
4518 struct task_struct *p; 4917 struct task_struct *p;
@@ -4584,8 +4983,8 @@ need_resched_nonpreemptible:
4584 if (unlikely(!rq->nr_running)) 4983 if (unlikely(!rq->nr_running))
4585 idle_balance(cpu, rq); 4984 idle_balance(cpu, rq);
4586 4985
4587 prev->sched_class->put_prev_task(rq, prev); 4986 put_prev_task(rq, prev);
4588 next = pick_next_task(rq, prev); 4987 next = pick_next_task(rq);
4589 4988
4590 if (likely(prev != next)) { 4989 if (likely(prev != next)) {
4591 sched_info_switch(prev, next); 4990 sched_info_switch(prev, next);
@@ -4707,7 +5106,7 @@ asmlinkage void __sched preempt_schedule(void)
4707 * between schedule and now. 5106 * between schedule and now.
4708 */ 5107 */
4709 barrier(); 5108 barrier();
4710 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5109 } while (need_resched());
4711} 5110}
4712EXPORT_SYMBOL(preempt_schedule); 5111EXPORT_SYMBOL(preempt_schedule);
4713 5112
@@ -4736,7 +5135,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4736 * between schedule and now. 5135 * between schedule and now.
4737 */ 5136 */
4738 barrier(); 5137 barrier();
4739 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5138 } while (need_resched());
4740} 5139}
4741 5140
4742#endif /* CONFIG_PREEMPT */ 5141#endif /* CONFIG_PREEMPT */
@@ -4797,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4797 __wake_up_common(q, mode, 1, 0, NULL); 5196 __wake_up_common(q, mode, 1, 0, NULL);
4798} 5197}
4799 5198
5199void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5200{
5201 __wake_up_common(q, mode, 1, 0, key);
5202}
5203
4800/** 5204/**
4801 * __wake_up_sync - wake up threads blocked on a waitqueue. 5205 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4802 * @q: the waitqueue 5206 * @q: the waitqueue
4803 * @mode: which threads 5207 * @mode: which threads
4804 * @nr_exclusive: how many wake-one or wake-many threads to wake up 5208 * @nr_exclusive: how many wake-one or wake-many threads to wake up
5209 * @key: opaque value to be passed to wakeup targets
4805 * 5210 *
4806 * The sync wakeup differs that the waker knows that it will schedule 5211 * The sync wakeup differs that the waker knows that it will schedule
4807 * away soon, so while the target thread will be woken up, it will not 5212 * away soon, so while the target thread will be woken up, it will not
@@ -4810,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4810 * 5215 *
4811 * On UP it can prevent extra preemption. 5216 * On UP it can prevent extra preemption.
4812 */ 5217 */
4813void 5218void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4814__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 5219 int nr_exclusive, void *key)
4815{ 5220{
4816 unsigned long flags; 5221 unsigned long flags;
4817 int sync = 1; 5222 int sync = 1;
@@ -4823,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4823 sync = 0; 5228 sync = 0;
4824 5229
4825 spin_lock_irqsave(&q->lock, flags); 5230 spin_lock_irqsave(&q->lock, flags);
4826 __wake_up_common(q, mode, nr_exclusive, sync, NULL); 5231 __wake_up_common(q, mode, nr_exclusive, sync, key);
4827 spin_unlock_irqrestore(&q->lock, flags); 5232 spin_unlock_irqrestore(&q->lock, flags);
4828} 5233}
5234EXPORT_SYMBOL_GPL(__wake_up_sync_key);
5235
5236/*
5237 * __wake_up_sync - see __wake_up_sync_key()
5238 */
5239void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
5240{
5241 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
5242}
4829EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ 5243EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4830 5244
4831/** 5245/**
@@ -5210,7 +5624,7 @@ SYSCALL_DEFINE1(nice, int, increment)
5210 if (increment > 40) 5624 if (increment > 40)
5211 increment = 40; 5625 increment = 40;
5212 5626
5213 nice = PRIO_TO_NICE(current->static_prio) + increment; 5627 nice = TASK_NICE(current) + increment;
5214 if (nice < -20) 5628 if (nice < -20)
5215 nice = -20; 5629 nice = -20;
5216 if (nice > 19) 5630 if (nice > 19)
@@ -6483,7 +6897,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6483 if (!rq->nr_running) 6897 if (!rq->nr_running)
6484 break; 6898 break;
6485 update_rq_clock(rq); 6899 update_rq_clock(rq);
6486 next = pick_next_task(rq, rq->curr); 6900 next = pick_next_task(rq);
6487 if (!next) 6901 if (!next)
6488 break; 6902 break;
6489 next->sched_class->put_prev_task(rq, next); 6903 next->sched_class->put_prev_task(rq, next);
@@ -8278,11 +8692,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8278 __set_bit(MAX_RT_PRIO, array->bitmap); 8692 __set_bit(MAX_RT_PRIO, array->bitmap);
8279 8693
8280#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8694#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8281 rt_rq->highest_prio = MAX_RT_PRIO; 8695 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8696#ifdef CONFIG_SMP
8697 rt_rq->highest_prio.next = MAX_RT_PRIO;
8698#endif
8282#endif 8699#endif
8283#ifdef CONFIG_SMP 8700#ifdef CONFIG_SMP
8284 rt_rq->rt_nr_migratory = 0; 8701 rt_rq->rt_nr_migratory = 0;
8285 rt_rq->overloaded = 0; 8702 rt_rq->overloaded = 0;
8703 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8286#endif 8704#endif
8287 8705
8288 rt_rq->rt_time = 0; 8706 rt_rq->rt_time = 0;
@@ -9658,7 +10076,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9658 struct cpuacct *ca; 10076 struct cpuacct *ca;
9659 int cpu; 10077 int cpu;
9660 10078
9661 if (!cpuacct_subsys.active) 10079 if (unlikely(!cpuacct_subsys.active))
9662 return; 10080 return;
9663 10081
9664 cpu = task_cpu(tsk); 10082 cpu = task_cpu(tsk);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 7ec82c1c61c5..819f17ac796e 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -45,9 +45,6 @@ static __read_mostly int sched_clock_running;
45 45
46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47__read_mostly int sched_clock_stable; 47__read_mostly int sched_clock_stable;
48#else
49static const int sched_clock_stable = 1;
50#endif
51 48
52struct sched_clock_data { 49struct sched_clock_data {
53 /* 50 /*
@@ -116,14 +113,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
116 s64 delta = now - scd->tick_raw; 113 s64 delta = now - scd->tick_raw;
117 u64 clock, min_clock, max_clock; 114 u64 clock, min_clock, max_clock;
118 115
119 WARN_ON_ONCE(!irqs_disabled());
120
121 if (unlikely(delta < 0)) 116 if (unlikely(delta < 0))
122 delta = 0; 117 delta = 0;
123 118
124 if (unlikely(!sched_clock_running))
125 return 0ull;
126
127 /* 119 /*
128 * scd->clock = clamp(scd->tick_gtod + delta, 120 * scd->clock = clamp(scd->tick_gtod + delta,
129 * max(scd->tick_gtod, scd->clock), 121 * max(scd->tick_gtod, scd->clock),
@@ -213,18 +205,20 @@ u64 sched_clock_cpu(int cpu)
213 return clock; 205 return clock;
214} 206}
215 207
216#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
217
218void sched_clock_tick(void) 208void sched_clock_tick(void)
219{ 209{
220 struct sched_clock_data *scd = this_scd(); 210 struct sched_clock_data *scd;
221 u64 now, now_gtod; 211 u64 now, now_gtod;
222 212
213 if (sched_clock_stable)
214 return;
215
223 if (unlikely(!sched_clock_running)) 216 if (unlikely(!sched_clock_running))
224 return; 217 return;
225 218
226 WARN_ON_ONCE(!irqs_disabled()); 219 WARN_ON_ONCE(!irqs_disabled());
227 220
221 scd = this_scd();
228 now_gtod = ktime_to_ns(ktime_get()); 222 now_gtod = ktime_to_ns(ktime_get());
229 now = sched_clock(); 223 now = sched_clock();
230 224
@@ -257,6 +251,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
257} 251}
258EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 252EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
259 253
254#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
255
256void sched_clock_init(void)
257{
258 sched_clock_running = 1;
259}
260
261u64 sched_clock_cpu(int cpu)
262{
263 if (unlikely(!sched_clock_running))
264 return 0;
265
266 return sched_clock();
267}
268
260#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 269#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
261 270
262unsigned long long cpu_clock(int cpu) 271unsigned long long cpu_clock(int cpu)
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 642a94ef8a0a..9a7e859b8fbf 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -25,7 +25,7 @@ struct cpupri {
25 25
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27int cpupri_find(struct cpupri *cp, 27int cpupri_find(struct cpupri *cp,
28 struct task_struct *p, cpumask_t *lowest_mask); 28 struct task_struct *p, struct cpumask *lowest_mask);
29void cpupri_set(struct cpupri *cp, int cpu, int pri); 29void cpupri_set(struct cpupri *cp, int cpu, int pri);
30int cpupri_init(struct cpupri *cp, bool bootmem); 30int cpupri_init(struct cpupri *cp, bool bootmem);
31void cpupri_cleanup(struct cpupri *cp); 31void cpupri_cleanup(struct cpupri *cp);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e4169..467ca72f1657 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
272 P(nr_switches); 272 P(nr_switches);
273 P(nr_load_updates); 273 P(nr_load_updates);
274 P(nr_uninterruptible); 274 P(nr_uninterruptible);
275 SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
276 PN(next_balance); 275 PN(next_balance);
277 P(curr->pid); 276 P(curr->pid);
278 PN(clock); 277 PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
287#ifdef CONFIG_SCHEDSTATS 286#ifdef CONFIG_SCHEDSTATS
288#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
289 288
290 P(yld_exp_empty);
291 P(yld_act_empty);
292 P(yld_both_empty);
293 P(yld_count); 289 P(yld_count);
294 290
295 P(sched_switch); 291 P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
314 u64 now = ktime_to_ns(ktime_get()); 310 u64 now = ktime_to_ns(ktime_get());
315 int cpu; 311 int cpu;
316 312
317 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", 313 SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
318 init_utsname()->release, 314 init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "), 315 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version); 316 init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
325 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 321 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
326#define PN(x) \ 322#define PN(x) \
327 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 323 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
324 P(jiffies);
328 PN(sysctl_sched_latency); 325 PN(sysctl_sched_latency);
329 PN(sysctl_sched_min_granularity); 326 PN(sysctl_sched_min_granularity);
330 PN(sysctl_sched_wakeup_granularity); 327 PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 394 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 395 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 396 PN(se.avg_overlap);
397 PN(se.avg_wakeup);
400 398
401 nr_switches = p->nvcsw + p->nivcsw; 399 nr_switches = p->nvcsw + p->nivcsw;
402 400
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0566f2a03c42..3816f217f119 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1314,16 +1314,63 @@ out:
1314} 1314}
1315#endif /* CONFIG_SMP */ 1315#endif /* CONFIG_SMP */
1316 1316
1317static unsigned long wakeup_gran(struct sched_entity *se) 1317/*
1318 * Adaptive granularity
1319 *
1320 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1321 * with the limit of wakeup_gran -- when it never does a wakeup.
1322 *
1323 * So the smaller avg_wakeup is the faster we want this task to preempt,
1324 * but we don't want to treat the preemptee unfairly and therefore allow it
1325 * to run for at least the amount of time we'd like to run.
1326 *
1327 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1328 *
1329 * NOTE: we use *nr_running to scale with load, this nicely matches the
1330 * degrading latency on load.
1331 */
1332static unsigned long
1333adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1334{
1335 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1336 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1337 u64 gran = 0;
1338
1339 if (this_run < expected_wakeup)
1340 gran = expected_wakeup - this_run;
1341
1342 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1343}
1344
1345static unsigned long
1346wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1318{ 1347{
1319 unsigned long gran = sysctl_sched_wakeup_granularity; 1348 unsigned long gran = sysctl_sched_wakeup_granularity;
1320 1349
1350 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1351 gran = adaptive_gran(curr, se);
1352
1321 /* 1353 /*
1322 * More easily preempt - nice tasks, while not making it harder for 1354 * Since its curr running now, convert the gran from real-time
1323 * + nice tasks. 1355 * to virtual-time in his units.
1324 */ 1356 */
1325 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1357 if (sched_feat(ASYM_GRAN)) {
1326 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1358 /*
1359 * By using 'se' instead of 'curr' we penalize light tasks, so
1360 * they get preempted easier. That is, if 'se' < 'curr' then
1361 * the resulting gran will be larger, therefore penalizing the
1362 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1363 * be smaller, again penalizing the lighter task.
1364 *
1365 * This is especially important for buddies when the leftmost
1366 * task is higher priority than the buddy.
1367 */
1368 if (unlikely(se->load.weight != NICE_0_LOAD))
1369 gran = calc_delta_fair(gran, se);
1370 } else {
1371 if (unlikely(curr->load.weight != NICE_0_LOAD))
1372 gran = calc_delta_fair(gran, curr);
1373 }
1327 1374
1328 return gran; 1375 return gran;
1329} 1376}
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1350 if (vdiff <= 0) 1397 if (vdiff <= 0)
1351 return -1; 1398 return -1;
1352 1399
1353 gran = wakeup_gran(curr); 1400 gran = wakeup_gran(curr, se);
1354 if (vdiff > gran) 1401 if (vdiff > gran)
1355 return 1; 1402 return 1;
1356 1403
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 07bc02e99ab1..4569bfa7df9b 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
2SCHED_FEAT(NORMALIZED_SLEEPER, 1) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1)
3SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
4SCHED_FEAT(START_DEBIT, 1) 5SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 6SCHED_FEAT(AFFINE_WAKEUPS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index da932f4c8524..299d012b4394 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 1021 return next;
879} 1022}
880 1023
881static struct task_struct *pick_next_task_rt(struct rq *rq) 1024static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 1025{
883 struct sched_rt_entity *rt_se; 1026 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 1027 struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 1043
901 p = rt_task_of(rt_se); 1044 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 1045 p->se.exec_start = rq->clock;
1046
1047 return p;
1048}
1049
1050static struct task_struct *pick_next_task_rt(struct rq *rq)
1051{
1052 struct task_struct *p = _pick_next_task_rt(rq);
1053
1054 /* The running task is never eligible for pushing */
1055 if (p)
1056 dequeue_pushable_task(rq, p);
1057
903 return p; 1058 return p;
904} 1059}
905 1060
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 1062{
908 update_curr_rt(rq); 1063 update_curr_rt(rq);
909 p->se.exec_start = 0; 1064 p->se.exec_start = 0;
1065
1066 /*
1067 * The previous task needs to be made eligible for pushing
1068 * if it is still active
1069 */
1070 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1071 enqueue_pushable_task(rq, p);
910} 1072}
911 1073
912#ifdef CONFIG_SMP 1074#ifdef CONFIG_SMP
@@ -1080,7 +1242,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1080 } 1242 }
1081 1243
1082 /* If this rq is still suitable use it. */ 1244 /* If this rq is still suitable use it. */
1083 if (lowest_rq->rt.highest_prio > task->prio) 1245 if (lowest_rq->rt.highest_prio.curr > task->prio)
1084 break; 1246 break;
1085 1247
1086 /* try again */ 1248 /* try again */
@@ -1091,6 +1253,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1091 return lowest_rq; 1253 return lowest_rq;
1092} 1254}
1093 1255
1256static inline int has_pushable_tasks(struct rq *rq)
1257{
1258 return !plist_head_empty(&rq->rt.pushable_tasks);
1259}
1260
1261static struct task_struct *pick_next_pushable_task(struct rq *rq)
1262{
1263 struct task_struct *p;
1264
1265 if (!has_pushable_tasks(rq))
1266 return NULL;
1267
1268 p = plist_first_entry(&rq->rt.pushable_tasks,
1269 struct task_struct, pushable_tasks);
1270
1271 BUG_ON(rq->cpu != task_cpu(p));
1272 BUG_ON(task_current(rq, p));
1273 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1274
1275 BUG_ON(!p->se.on_rq);
1276 BUG_ON(!rt_task(p));
1277
1278 return p;
1279}
1280
1094/* 1281/*
1095 * If the current CPU has more than one RT task, see if the non 1282 * If the current CPU has more than one RT task, see if the non
1096 * running task can migrate over to a CPU that is running a task 1283 * running task can migrate over to a CPU that is running a task
@@ -1100,13 +1287,11 @@ static int push_rt_task(struct rq *rq)
1100{ 1287{
1101 struct task_struct *next_task; 1288 struct task_struct *next_task;
1102 struct rq *lowest_rq; 1289 struct rq *lowest_rq;
1103 int ret = 0;
1104 int paranoid = RT_MAX_TRIES;
1105 1290
1106 if (!rq->rt.overloaded) 1291 if (!rq->rt.overloaded)
1107 return 0; 1292 return 0;
1108 1293
1109 next_task = pick_next_highest_task_rt(rq, -1); 1294 next_task = pick_next_pushable_task(rq);
1110 if (!next_task) 1295 if (!next_task)
1111 return 0; 1296 return 0;
1112 1297
@@ -1135,16 +1320,34 @@ static int push_rt_task(struct rq *rq)
1135 struct task_struct *task; 1320 struct task_struct *task;
1136 /* 1321 /*
1137 * find lock_lowest_rq releases rq->lock 1322 * find lock_lowest_rq releases rq->lock
1138 * so it is possible that next_task has changed. 1323 * so it is possible that next_task has migrated.
1139 * If it has, then try again. 1324 *
1325 * We need to make sure that the task is still on the same
1326 * run-queue and is also still the next task eligible for
1327 * pushing.
1140 */ 1328 */
1141 task = pick_next_highest_task_rt(rq, -1); 1329 task = pick_next_pushable_task(rq);
1142 if (unlikely(task != next_task) && task && paranoid--) { 1330 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1143 put_task_struct(next_task); 1331 /*
1144 next_task = task; 1332 * If we get here, the task hasnt moved at all, but
1145 goto retry; 1333 * it has failed to push. We will not try again,
1334 * since the other cpus will pull from us when they
1335 * are ready.
1336 */
1337 dequeue_pushable_task(rq, next_task);
1338 goto out;
1146 } 1339 }
1147 goto out; 1340
1341 if (!task)
1342 /* No more tasks, just exit */
1343 goto out;
1344
1345 /*
1346 * Something has shifted, try again.
1347 */
1348 put_task_struct(next_task);
1349 next_task = task;
1350 goto retry;
1148 } 1351 }
1149 1352
1150 deactivate_task(rq, next_task, 0); 1353 deactivate_task(rq, next_task, 0);
@@ -1155,23 +1358,12 @@ static int push_rt_task(struct rq *rq)
1155 1358
1156 double_unlock_balance(rq, lowest_rq); 1359 double_unlock_balance(rq, lowest_rq);
1157 1360
1158 ret = 1;
1159out: 1361out:
1160 put_task_struct(next_task); 1362 put_task_struct(next_task);
1161 1363
1162 return ret; 1364 return 1;
1163} 1365}
1164 1366
1165/*
1166 * TODO: Currently we just use the second highest prio task on
1167 * the queue, and stop when it can't migrate (or there's
1168 * no more RT tasks). There may be a case where a lower
1169 * priority RT task has a different affinity than the
1170 * higher RT task. In this case the lower RT task could
1171 * possibly be able to migrate where as the higher priority
1172 * RT task could not. We currently ignore this issue.
1173 * Enhancements are welcome!
1174 */
1175static void push_rt_tasks(struct rq *rq) 1367static void push_rt_tasks(struct rq *rq)
1176{ 1368{
1177 /* push_rt_task will return true if it moved an RT */ 1369 /* push_rt_task will return true if it moved an RT */
@@ -1182,33 +1374,35 @@ static void push_rt_tasks(struct rq *rq)
1182static int pull_rt_task(struct rq *this_rq) 1374static int pull_rt_task(struct rq *this_rq)
1183{ 1375{
1184 int this_cpu = this_rq->cpu, ret = 0, cpu; 1376 int this_cpu = this_rq->cpu, ret = 0, cpu;
1185 struct task_struct *p, *next; 1377 struct task_struct *p;
1186 struct rq *src_rq; 1378 struct rq *src_rq;
1187 1379
1188 if (likely(!rt_overloaded(this_rq))) 1380 if (likely(!rt_overloaded(this_rq)))
1189 return 0; 1381 return 0;
1190 1382
1191 next = pick_next_task_rt(this_rq);
1192
1193 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1383 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1194 if (this_cpu == cpu) 1384 if (this_cpu == cpu)
1195 continue; 1385 continue;
1196 1386
1197 src_rq = cpu_rq(cpu); 1387 src_rq = cpu_rq(cpu);
1388
1389 /*
1390 * Don't bother taking the src_rq->lock if the next highest
1391 * task is known to be lower-priority than our current task.
1392 * This may look racy, but if this value is about to go
1393 * logically higher, the src_rq will push this task away.
1394 * And if its going logically lower, we do not care
1395 */
1396 if (src_rq->rt.highest_prio.next >=
1397 this_rq->rt.highest_prio.curr)
1398 continue;
1399
1198 /* 1400 /*
1199 * We can potentially drop this_rq's lock in 1401 * We can potentially drop this_rq's lock in
1200 * double_lock_balance, and another CPU could 1402 * double_lock_balance, and another CPU could
1201 * steal our next task - hence we must cause 1403 * alter this_rq
1202 * the caller to recalculate the next task
1203 * in that case:
1204 */ 1404 */
1205 if (double_lock_balance(this_rq, src_rq)) { 1405 double_lock_balance(this_rq, src_rq);
1206 struct task_struct *old_next = next;
1207
1208 next = pick_next_task_rt(this_rq);
1209 if (next != old_next)
1210 ret = 1;
1211 }
1212 1406
1213 /* 1407 /*
1214 * Are there still pullable RT tasks? 1408 * Are there still pullable RT tasks?
@@ -1222,7 +1416,7 @@ static int pull_rt_task(struct rq *this_rq)
1222 * Do we have an RT task that preempts 1416 * Do we have an RT task that preempts
1223 * the to-be-scheduled task? 1417 * the to-be-scheduled task?
1224 */ 1418 */
1225 if (p && (!next || (p->prio < next->prio))) { 1419 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1226 WARN_ON(p == src_rq->curr); 1420 WARN_ON(p == src_rq->curr);
1227 WARN_ON(!p->se.on_rq); 1421 WARN_ON(!p->se.on_rq);
1228 1422
@@ -1232,12 +1426,9 @@ static int pull_rt_task(struct rq *this_rq)
1232 * This is just that p is wakeing up and hasn't 1426 * This is just that p is wakeing up and hasn't
1233 * had a chance to schedule. We only pull 1427 * had a chance to schedule. We only pull
1234 * p if it is lower in priority than the 1428 * p if it is lower in priority than the
1235 * current task on the run queue or 1429 * current task on the run queue
1236 * this_rq next task is lower in prio than
1237 * the current task on that rq.
1238 */ 1430 */
1239 if (p->prio < src_rq->curr->prio || 1431 if (p->prio < src_rq->curr->prio)
1240 (next && next->prio < src_rq->curr->prio))
1241 goto skip; 1432 goto skip;
1242 1433
1243 ret = 1; 1434 ret = 1;
@@ -1250,13 +1441,7 @@ static int pull_rt_task(struct rq *this_rq)
1250 * case there's an even higher prio task 1441 * case there's an even higher prio task
1251 * in another runqueue. (low likelyhood 1442 * in another runqueue. (low likelyhood
1252 * but possible) 1443 * but possible)
1253 *
1254 * Update next so that we won't pick a task
1255 * on another cpu with a priority lower (or equal)
1256 * than the one we just picked.
1257 */ 1444 */
1258 next = p;
1259
1260 } 1445 }
1261 skip: 1446 skip:
1262 double_unlock_balance(this_rq, src_rq); 1447 double_unlock_balance(this_rq, src_rq);
@@ -1268,24 +1453,27 @@ static int pull_rt_task(struct rq *this_rq)
1268static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1453static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1269{ 1454{
1270 /* Try to pull RT tasks here if we lower this rq's prio */ 1455 /* Try to pull RT tasks here if we lower this rq's prio */
1271 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1456 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1272 pull_rt_task(rq); 1457 pull_rt_task(rq);
1273} 1458}
1274 1459
1460/*
1461 * assumes rq->lock is held
1462 */
1463static int needs_post_schedule_rt(struct rq *rq)
1464{
1465 return has_pushable_tasks(rq);
1466}
1467
1275static void post_schedule_rt(struct rq *rq) 1468static void post_schedule_rt(struct rq *rq)
1276{ 1469{
1277 /* 1470 /*
1278 * If we have more than one rt_task queued, then 1471 * This is only called if needs_post_schedule_rt() indicates that
1279 * see if we can push the other rt_tasks off to other CPUS. 1472 * we need to push tasks away
1280 * Note we may release the rq lock, and since
1281 * the lock was owned by prev, we need to release it
1282 * first via finish_lock_switch and then reaquire it here.
1283 */ 1473 */
1284 if (unlikely(rq->rt.overloaded)) { 1474 spin_lock_irq(&rq->lock);
1285 spin_lock_irq(&rq->lock); 1475 push_rt_tasks(rq);
1286 push_rt_tasks(rq); 1476 spin_unlock_irq(&rq->lock);
1287 spin_unlock_irq(&rq->lock);
1288 }
1289} 1477}
1290 1478
1291/* 1479/*
@@ -1296,7 +1484,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1296{ 1484{
1297 if (!task_running(rq, p) && 1485 if (!task_running(rq, p) &&
1298 !test_tsk_need_resched(rq->curr) && 1486 !test_tsk_need_resched(rq->curr) &&
1299 rq->rt.overloaded) 1487 has_pushable_tasks(rq) &&
1488 p->rt.nr_cpus_allowed > 1)
1300 push_rt_tasks(rq); 1489 push_rt_tasks(rq);
1301} 1490}
1302 1491
@@ -1332,6 +1521,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1332 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1521 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1333 struct rq *rq = task_rq(p); 1522 struct rq *rq = task_rq(p);
1334 1523
1524 if (!task_current(rq, p)) {
1525 /*
1526 * Make sure we dequeue this task from the pushable list
1527 * before going further. It will either remain off of
1528 * the list because we are no longer pushable, or it
1529 * will be requeued.
1530 */
1531 if (p->rt.nr_cpus_allowed > 1)
1532 dequeue_pushable_task(rq, p);
1533
1534 /*
1535 * Requeue if our weight is changing and still > 1
1536 */
1537 if (weight > 1)
1538 enqueue_pushable_task(rq, p);
1539
1540 }
1541
1335 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1542 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1336 rq->rt.rt_nr_migratory++; 1543 rq->rt.rt_nr_migratory++;
1337 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1544 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1339,7 +1546,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1339 rq->rt.rt_nr_migratory--; 1546 rq->rt.rt_nr_migratory--;
1340 } 1547 }
1341 1548
1342 update_rt_migration(rq); 1549 update_rt_migration(&rq->rt);
1343 } 1550 }
1344 1551
1345 cpumask_copy(&p->cpus_allowed, new_mask); 1552 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1354,7 +1561,7 @@ static void rq_online_rt(struct rq *rq)
1354 1561
1355 __enable_runtime(rq); 1562 __enable_runtime(rq);
1356 1563
1357 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1564 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1358} 1565}
1359 1566
1360/* Assumes rq->lock is held */ 1567/* Assumes rq->lock is held */
@@ -1446,7 +1653,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1446 * can release the rq lock and p could migrate. 1653 * can release the rq lock and p could migrate.
1447 * Only reschedule if p is still on the same runqueue. 1654 * Only reschedule if p is still on the same runqueue.
1448 */ 1655 */
1449 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1656 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1450 resched_task(p); 1657 resched_task(p);
1451#else 1658#else
1452 /* For UP simply resched on drop of prio */ 1659 /* For UP simply resched on drop of prio */
@@ -1517,6 +1724,9 @@ static void set_curr_task_rt(struct rq *rq)
1517 struct task_struct *p = rq->curr; 1724 struct task_struct *p = rq->curr;
1518 1725
1519 p->se.exec_start = rq->clock; 1726 p->se.exec_start = rq->clock;
1727
1728 /* The running task is never eligible for pushing */
1729 dequeue_pushable_task(rq, p);
1520} 1730}
1521 1731
1522static const struct sched_class rt_sched_class = { 1732static const struct sched_class rt_sched_class = {
@@ -1539,6 +1749,7 @@ static const struct sched_class rt_sched_class = {
1539 .rq_online = rq_online_rt, 1749 .rq_online = rq_online_rt,
1540 .rq_offline = rq_offline_rt, 1750 .rq_offline = rq_offline_rt,
1541 .pre_schedule = pre_schedule_rt, 1751 .pre_schedule = pre_schedule_rt,
1752 .needs_post_schedule = needs_post_schedule_rt,
1542 .post_schedule = post_schedule_rt, 1753 .post_schedule = post_schedule_rt,
1543 .task_wake_up = task_wake_up_rt, 1754 .task_wake_up = task_wake_up_rt,
1544 .switched_from = switched_from_rt, 1755 .switched_from = switched_from_rt,
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a8f93dd374e1..32d2bd4061b0 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -4,7 +4,7 @@
4 * bump this up when changing the output format or the meaning of an existing 4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort) 5 * format, so that tools can adapt (or abort)
6 */ 6 */
7#define SCHEDSTAT_VERSION 14 7#define SCHEDSTAT_VERSION 15
8 8
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
26 26
27 /* runqueue-specific stats */ 27 /* runqueue-specific stats */
28 seq_printf(seq, 28 seq_printf(seq,
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", 29 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty, 30 cpu, rq->yld_count,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle, 31 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local, 32 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_cpu_time, 33 rq->rq_cpu_time,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a74fe87c0dd..1c8814481a11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1575,7 +1575,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1575 read_lock(&tasklist_lock); 1575 read_lock(&tasklist_lock);
1576 if (may_ptrace_stop()) { 1576 if (may_ptrace_stop()) {
1577 do_notify_parent_cldstop(current, CLD_TRAPPED); 1577 do_notify_parent_cldstop(current, CLD_TRAPPED);
1578 /*
1579 * Don't want to allow preemption here, because
1580 * sys_ptrace() needs this task to be inactive.
1581 *
1582 * XXX: implement read_unlock_no_resched().
1583 */
1584 preempt_disable();
1578 read_unlock(&tasklist_lock); 1585 read_unlock(&tasklist_lock);
1586 preempt_enable_no_resched();
1579 schedule(); 1587 schedule();
1580 } else { 1588 } else {
1581 /* 1589 /*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 74541ca49536..912823e2a11b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
44static int refcount; 44static int refcount;
45static struct workqueue_struct *stop_machine_wq; 45static struct workqueue_struct *stop_machine_wq;
46static struct stop_machine_data active, idle; 46static struct stop_machine_data active, idle;
47static const cpumask_t *active_cpus; 47static const struct cpumask *active_cpus;
48static void *stop_machine_work; 48static void *stop_machine_work;
49 49
50static void set_state(enum stopmachine_state newstate) 50static void set_state(enum stopmachine_state newstate)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c5ef44ff850f..2e490a389dd2 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1010,7 +1010,7 @@ static struct ctl_table vm_table[] = {
1010 .data = &dirty_expire_interval, 1010 .data = &dirty_expire_interval,
1011 .maxlen = sizeof(dirty_expire_interval), 1011 .maxlen = sizeof(dirty_expire_interval),
1012 .mode = 0644, 1012 .mode = 0644,
1013 .proc_handler = &proc_dointvec_userhz_jiffies, 1013 .proc_handler = &proc_dointvec,
1014 }, 1014 },
1015 { 1015 {
1016 .ctl_name = VM_NR_PDFLUSH_THREADS, 1016 .ctl_name = VM_NR_PDFLUSH_THREADS,
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index fafeb48f27c0..b38423ca711a 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, 219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, 220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, 221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
222 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
222 {} 223 {}
223}; 224};
224 225
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 905b0b50792d..0b0a6366c9d4 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o 1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
2 2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ea2f48af83cf..d13be216a790 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -68,6 +68,17 @@ void clockevents_set_mode(struct clock_event_device *dev,
68 if (dev->mode != mode) { 68 if (dev->mode != mode) {
69 dev->set_mode(mode, dev); 69 dev->set_mode(mode, dev);
70 dev->mode = mode; 70 dev->mode = mode;
71
72 /*
73 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
74 * on it, so fix it up and emit a warning:
75 */
76 if (mode == CLOCK_EVT_MODE_ONESHOT) {
77 if (unlikely(!dev->mult)) {
78 dev->mult = 1;
79 WARN_ON(1);
80 }
81 }
71 } 82 }
72} 83}
73 84
@@ -168,15 +179,6 @@ void clockevents_register_device(struct clock_event_device *dev)
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 179 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask); 180 BUG_ON(!dev->cpumask);
170 181
171 /*
172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
173 * on it, so fix it up and emit a warning:
174 */
175 if (unlikely(!dev->mult)) {
176 dev->mult = 1;
177 WARN_ON(1);
178 }
179
180 spin_lock(&clockevents_lock); 182 spin_lock(&clockevents_lock);
181 183
182 list_add(&dev->list, &clockevent_devices); 184 list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ca89e1593f08..c46c931a7fe7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -31,6 +31,82 @@
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32#include <linux/tick.h> 32#include <linux/tick.h>
33 33
34void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
36 u64 start_tstamp)
37{
38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
41}
42EXPORT_SYMBOL(timecounter_init);
43
44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
47 *
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
50 * calls.
51 *
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
54 */
55static u64 timecounter_read_delta(struct timecounter *tc)
56{
57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset;
59
60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc);
62
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65
66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68
69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now;
71
72 return ns_offset;
73}
74
75u64 timecounter_read(struct timecounter *tc)
76{
77 u64 nsec;
78
79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec;
82 tc->nsec = nsec;
83
84 return nsec;
85}
86EXPORT_SYMBOL(timecounter_read);
87
88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp)
90{
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec;
93
94 /*
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
98 */
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 }
105
106 return nsec;
107}
108EXPORT_SYMBOL(timecounter_cyc2time);
109
34/* XXX - Would like a better way for initializing curr_clocksource */ 110/* XXX - Would like a better way for initializing curr_clocksource */
35extern struct clocksource clocksource_jiffies; 111extern struct clocksource clocksource_jiffies;
36 112
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f5f793d92415..7fc64375ff43 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -1,71 +1,129 @@
1/* 1/*
2 * linux/kernel/time/ntp.c
3 *
4 * NTP state machine interfaces and logic. 2 * NTP state machine interfaces and logic.
5 * 3 *
6 * This code was mainly moved from kernel/timer.c and kernel/time.c 4 * This code was mainly moved from kernel/timer.c and kernel/time.c
7 * Please see those files for relevant copyright info and historical 5 * Please see those files for relevant copyright info and historical
8 * changelogs. 6 * changelogs.
9 */ 7 */
10
11#include <linux/mm.h>
12#include <linux/time.h>
13#include <linux/timex.h>
14#include <linux/jiffies.h>
15#include <linux/hrtimer.h>
16#include <linux/capability.h> 8#include <linux/capability.h>
17#include <linux/math64.h>
18#include <linux/clocksource.h> 9#include <linux/clocksource.h>
19#include <linux/workqueue.h> 10#include <linux/workqueue.h>
20#include <asm/timex.h> 11#include <linux/hrtimer.h>
12#include <linux/jiffies.h>
13#include <linux/math64.h>
14#include <linux/timex.h>
15#include <linux/time.h>
16#include <linux/mm.h>
21 17
22/* 18/*
23 * Timekeeping variables 19 * NTP timekeeping variables:
24 */ 20 */
25unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
26unsigned long tick_nsec; /* ACTHZ period (nsec) */
27u64 tick_length;
28static u64 tick_length_base;
29 21
30static struct hrtimer leap_timer; 22/* USER_HZ period (usecs): */
23unsigned long tick_usec = TICK_USEC;
31 24
32#define MAX_TICKADJ 500 /* microsecs */ 25/* ACTHZ period (nsecs): */
33#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 26unsigned long tick_nsec;
34 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 27
28u64 tick_length;
29static u64 tick_length_base;
30
31static struct hrtimer leap_timer;
32
33#define MAX_TICKADJ 500LL /* usecs */
34#define MAX_TICKADJ_SCALED \
35 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
35 36
36/* 37/*
37 * phase-lock loop variables 38 * phase-lock loop variables
38 */ 39 */
39/* TIME_ERROR prevents overwriting the CMOS clock */
40static int time_state = TIME_OK; /* clock synchronization status */
41int time_status = STA_UNSYNC; /* clock status bits */
42static long time_tai; /* TAI offset (s) */
43static s64 time_offset; /* time adjustment (ns) */
44static long time_constant = 2; /* pll time constant */
45long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
46long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
47static s64 time_freq; /* frequency offset (scaled ns/s)*/
48static long time_reftime; /* time at last adjustment (s) */
49long time_adjust;
50static long ntp_tick_adj;
51 40
41/*
42 * clock synchronization status
43 *
44 * (TIME_ERROR prevents overwriting the CMOS clock)
45 */
46static int time_state = TIME_OK;
47
48/* clock status bits: */
49int time_status = STA_UNSYNC;
50
51/* TAI offset (secs): */
52static long time_tai;
53
54/* time adjustment (nsecs): */
55static s64 time_offset;
56
57/* pll time constant: */
58static long time_constant = 2;
59
60/* maximum error (usecs): */
61long time_maxerror = NTP_PHASE_LIMIT;
62
63/* estimated error (usecs): */
64long time_esterror = NTP_PHASE_LIMIT;
65
66/* frequency offset (scaled nsecs/secs): */
67static s64 time_freq;
68
69/* time at last adjustment (secs): */
70static long time_reftime;
71
72long time_adjust;
73
74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
75static s64 ntp_tick_adj;
76
77/*
78 * NTP methods:
79 */
80
81/*
82 * Update (tick_length, tick_length_base, tick_nsec), based
83 * on (tick_usec, ntp_tick_adj, time_freq):
84 */
52static void ntp_update_frequency(void) 85static void ntp_update_frequency(void)
53{ 86{
54 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) 87 u64 second_length;
55 << NTP_SCALE_SHIFT; 88 u64 new_base;
56 second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; 89
57 second_length += time_freq; 90 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
91 << NTP_SCALE_SHIFT;
92
93 second_length += ntp_tick_adj;
94 second_length += time_freq;
58 95
59 tick_length_base = second_length; 96 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
97 new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
60 98
61 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; 99 /*
62 tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); 100 * Don't wait for the next second_overflow, apply
101 * the change to the tick length immediately:
102 */
103 tick_length += new_base - tick_length_base;
104 tick_length_base = new_base;
105}
106
107static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
108{
109 time_status &= ~STA_MODE;
110
111 if (secs < MINSEC)
112 return 0;
113
114 if (!(time_status & STA_FLL) && (secs <= MAXSEC))
115 return 0;
116
117 time_status |= STA_MODE;
118
119 return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
63} 120}
64 121
65static void ntp_update_offset(long offset) 122static void ntp_update_offset(long offset)
66{ 123{
67 long mtemp;
68 s64 freq_adj; 124 s64 freq_adj;
125 s64 offset64;
126 long secs;
69 127
70 if (!(time_status & STA_PLL)) 128 if (!(time_status & STA_PLL))
71 return; 129 return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
84 * Select how the frequency is to be controlled 142 * Select how the frequency is to be controlled
85 * and in which mode (PLL or FLL). 143 * and in which mode (PLL or FLL).
86 */ 144 */
87 if (time_status & STA_FREQHOLD || time_reftime == 0) 145 secs = xtime.tv_sec - time_reftime;
88 time_reftime = xtime.tv_sec; 146 if (unlikely(time_status & STA_FREQHOLD))
89 mtemp = xtime.tv_sec - time_reftime; 147 secs = 0;
148
90 time_reftime = xtime.tv_sec; 149 time_reftime = xtime.tv_sec;
91 150
92 freq_adj = (s64)offset * mtemp; 151 offset64 = offset;
93 freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); 152 freq_adj = (offset64 * secs) <<
94 time_status &= ~STA_MODE; 153 (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
95 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
96 freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
97 mtemp);
98 time_status |= STA_MODE;
99 }
100 freq_adj += time_freq;
101 freq_adj = min(freq_adj, MAXFREQ_SCALED);
102 time_freq = max(freq_adj, -MAXFREQ_SCALED);
103 154
104 time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); 155 freq_adj += ntp_update_offset_fll(offset64, secs);
156
157 freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
158
159 time_freq = max(freq_adj, -MAXFREQ_SCALED);
160
161 time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
105} 162}
106 163
107/** 164/**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
111 */ 168 */
112void ntp_clear(void) 169void ntp_clear(void)
113{ 170{
114 time_adjust = 0; /* stop active adjtime() */ 171 time_adjust = 0; /* stop active adjtime() */
115 time_status |= STA_UNSYNC; 172 time_status |= STA_UNSYNC;
116 time_maxerror = NTP_PHASE_LIMIT; 173 time_maxerror = NTP_PHASE_LIMIT;
117 time_esterror = NTP_PHASE_LIMIT; 174 time_esterror = NTP_PHASE_LIMIT;
118 175
119 ntp_update_frequency(); 176 ntp_update_frequency();
120 177
121 tick_length = tick_length_base; 178 tick_length = tick_length_base;
122 time_offset = 0; 179 time_offset = 0;
123} 180}
124 181
125/* 182/*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
140 xtime.tv_sec--; 197 xtime.tv_sec--;
141 wall_to_monotonic.tv_sec++; 198 wall_to_monotonic.tv_sec++;
142 time_state = TIME_OOP; 199 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 200 printk(KERN_NOTICE
144 "inserting leap second 23:59:60 UTC\n"); 201 "Clock: inserting leap second 23:59:60 UTC\n");
145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 202 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 res = HRTIMER_RESTART; 203 res = HRTIMER_RESTART;
147 break; 204 break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
150 time_tai--; 207 time_tai--;
151 wall_to_monotonic.tv_sec--; 208 wall_to_monotonic.tv_sec--;
152 time_state = TIME_WAIT; 209 time_state = TIME_WAIT;
153 printk(KERN_NOTICE "Clock: " 210 printk(KERN_NOTICE
154 "deleting leap second 23:59:59 UTC\n"); 211 "Clock: deleting leap second 23:59:59 UTC\n");
155 break; 212 break;
156 case TIME_OOP: 213 case TIME_OOP:
157 time_tai++; 214 time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
179 */ 236 */
180void second_overflow(void) 237void second_overflow(void)
181{ 238{
182 s64 time_adj; 239 s64 delta;
183 240
184 /* Bump the maxerror field */ 241 /* Bump the maxerror field */
185 time_maxerror += MAXFREQ / NSEC_PER_USEC; 242 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
192 * Compute the phase adjustment for the next second. The offset is 249 * Compute the phase adjustment for the next second. The offset is
193 * reduced by a fixed factor times the time constant. 250 * reduced by a fixed factor times the time constant.
194 */ 251 */
195 tick_length = tick_length_base; 252 tick_length = tick_length_base;
196 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); 253
197 time_offset -= time_adj; 254 delta = shift_right(time_offset, SHIFT_PLL + time_constant);
198 tick_length += time_adj; 255 time_offset -= delta;
199 256 tick_length += delta;
200 if (unlikely(time_adjust)) { 257
201 if (time_adjust > MAX_TICKADJ) { 258 if (!time_adjust)
202 time_adjust -= MAX_TICKADJ; 259 return;
203 tick_length += MAX_TICKADJ_SCALED; 260
204 } else if (time_adjust < -MAX_TICKADJ) { 261 if (time_adjust > MAX_TICKADJ) {
205 time_adjust += MAX_TICKADJ; 262 time_adjust -= MAX_TICKADJ;
206 tick_length -= MAX_TICKADJ_SCALED; 263 tick_length += MAX_TICKADJ_SCALED;
207 } else { 264 return;
208 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
209 NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
210 time_adjust = 0;
211 }
212 } 265 }
266
267 if (time_adjust < -MAX_TICKADJ) {
268 time_adjust += MAX_TICKADJ;
269 tick_length -= MAX_TICKADJ_SCALED;
270 return;
271 }
272
273 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
274 << NTP_SCALE_SHIFT;
275 time_adjust = 0;
213} 276}
214 277
215#ifdef CONFIG_GENERIC_CMOS_UPDATE 278#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
233 * This code is run on a timer. If the clock is set, that timer 296 * This code is run on a timer. If the clock is set, that timer
234 * may not expire at the correct time. Thus, we adjust... 297 * may not expire at the correct time. Thus, we adjust...
235 */ 298 */
236 if (!ntp_synced()) 299 if (!ntp_synced()) {
237 /* 300 /*
238 * Not synced, exit, do not restart a timer (if one is 301 * Not synced, exit, do not restart a timer (if one is
239 * running, let it run out). 302 * running, let it run out).
240 */ 303 */
241 return; 304 return;
305 }
242 306
243 getnstimeofday(&now); 307 getnstimeofday(&now);
244 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 308 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
270static inline void notify_cmos_timer(void) { } 334static inline void notify_cmos_timer(void) { }
271#endif 335#endif
272 336
273/* adjtimex mainly allows reading (and writing, if superuser) of 337/*
338 * Start the leap seconds timer:
339 */
340static inline void ntp_start_leap_timer(struct timespec *ts)
341{
342 long now = ts->tv_sec;
343
344 if (time_status & STA_INS) {
345 time_state = TIME_INS;
346 now += 86400 - now % 86400;
347 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
348
349 return;
350 }
351
352 if (time_status & STA_DEL) {
353 time_state = TIME_DEL;
354 now += 86400 - (now + 1) % 86400;
355 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
356 }
357}
358
359/*
360 * Propagate a new txc->status value into the NTP state:
361 */
362static inline void process_adj_status(struct timex *txc, struct timespec *ts)
363{
364 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
365 time_state = TIME_OK;
366 time_status = STA_UNSYNC;
367 }
368
369 /*
370 * If we turn on PLL adjustments then reset the
371 * reference time to current time.
372 */
373 if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
374 time_reftime = xtime.tv_sec;
375
376 /* only set allowed bits */
377 time_status &= STA_RONLY;
378 time_status |= txc->status & ~STA_RONLY;
379
380 switch (time_state) {
381 case TIME_OK:
382 ntp_start_leap_timer(ts);
383 break;
384 case TIME_INS:
385 case TIME_DEL:
386 time_state = TIME_OK;
387 ntp_start_leap_timer(ts);
388 case TIME_WAIT:
389 if (!(time_status & (STA_INS | STA_DEL)))
390 time_state = TIME_OK;
391 break;
392 case TIME_OOP:
393 hrtimer_restart(&leap_timer);
394 break;
395 }
396}
397/*
398 * Called with the xtime lock held, so we can access and modify
399 * all the global NTP state:
400 */
401static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
402{
403 if (txc->modes & ADJ_STATUS)
404 process_adj_status(txc, ts);
405
406 if (txc->modes & ADJ_NANO)
407 time_status |= STA_NANO;
408
409 if (txc->modes & ADJ_MICRO)
410 time_status &= ~STA_NANO;
411
412 if (txc->modes & ADJ_FREQUENCY) {
413 time_freq = txc->freq * PPM_SCALE;
414 time_freq = min(time_freq, MAXFREQ_SCALED);
415 time_freq = max(time_freq, -MAXFREQ_SCALED);
416 }
417
418 if (txc->modes & ADJ_MAXERROR)
419 time_maxerror = txc->maxerror;
420
421 if (txc->modes & ADJ_ESTERROR)
422 time_esterror = txc->esterror;
423
424 if (txc->modes & ADJ_TIMECONST) {
425 time_constant = txc->constant;
426 if (!(time_status & STA_NANO))
427 time_constant += 4;
428 time_constant = min(time_constant, (long)MAXTC);
429 time_constant = max(time_constant, 0l);
430 }
431
432 if (txc->modes & ADJ_TAI && txc->constant > 0)
433 time_tai = txc->constant;
434
435 if (txc->modes & ADJ_OFFSET)
436 ntp_update_offset(txc->offset);
437
438 if (txc->modes & ADJ_TICK)
439 tick_usec = txc->tick;
440
441 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
442 ntp_update_frequency();
443}
444
445/*
446 * adjtimex mainly allows reading (and writing, if superuser) of
274 * kernel time-keeping variables. used by xntpd. 447 * kernel time-keeping variables. used by xntpd.
275 */ 448 */
276int do_adjtimex(struct timex *txc) 449int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
291 if (txc->modes && !capable(CAP_SYS_TIME)) 464 if (txc->modes && !capable(CAP_SYS_TIME))
292 return -EPERM; 465 return -EPERM;
293 466
294 /* if the quartz is off by more than 10% something is VERY wrong! */ 467 /*
468 * if the quartz is off by more than 10% then
469 * something is VERY wrong!
470 */
295 if (txc->modes & ADJ_TICK && 471 if (txc->modes & ADJ_TICK &&
296 (txc->tick < 900000/USER_HZ || 472 (txc->tick < 900000/USER_HZ ||
297 txc->tick > 1100000/USER_HZ)) 473 txc->tick > 1100000/USER_HZ))
298 return -EINVAL; 474 return -EINVAL;
299 475
300 if (txc->modes & ADJ_STATUS && time_state != TIME_OK) 476 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
301 hrtimer_cancel(&leap_timer); 477 hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
305 481
306 write_seqlock_irq(&xtime_lock); 482 write_seqlock_irq(&xtime_lock);
307 483
308 /* If there are input parameters, then process them */
309 if (txc->modes & ADJ_ADJTIME) { 484 if (txc->modes & ADJ_ADJTIME) {
310 long save_adjust = time_adjust; 485 long save_adjust = time_adjust;
311 486
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
315 ntp_update_frequency(); 490 ntp_update_frequency();
316 } 491 }
317 txc->offset = save_adjust; 492 txc->offset = save_adjust;
318 goto adj_done; 493 } else {
319 }
320 if (txc->modes) {
321 long sec;
322
323 if (txc->modes & ADJ_STATUS) {
324 if ((time_status & STA_PLL) &&
325 !(txc->status & STA_PLL)) {
326 time_state = TIME_OK;
327 time_status = STA_UNSYNC;
328 }
329 /* only set allowed bits */
330 time_status &= STA_RONLY;
331 time_status |= txc->status & ~STA_RONLY;
332
333 switch (time_state) {
334 case TIME_OK:
335 start_timer:
336 sec = ts.tv_sec;
337 if (time_status & STA_INS) {
338 time_state = TIME_INS;
339 sec += 86400 - sec % 86400;
340 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
341 } else if (time_status & STA_DEL) {
342 time_state = TIME_DEL;
343 sec += 86400 - (sec + 1) % 86400;
344 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
345 }
346 break;
347 case TIME_INS:
348 case TIME_DEL:
349 time_state = TIME_OK;
350 goto start_timer;
351 break;
352 case TIME_WAIT:
353 if (!(time_status & (STA_INS | STA_DEL)))
354 time_state = TIME_OK;
355 break;
356 case TIME_OOP:
357 hrtimer_restart(&leap_timer);
358 break;
359 }
360 }
361
362 if (txc->modes & ADJ_NANO)
363 time_status |= STA_NANO;
364 if (txc->modes & ADJ_MICRO)
365 time_status &= ~STA_NANO;
366
367 if (txc->modes & ADJ_FREQUENCY) {
368 time_freq = (s64)txc->freq * PPM_SCALE;
369 time_freq = min(time_freq, MAXFREQ_SCALED);
370 time_freq = max(time_freq, -MAXFREQ_SCALED);
371 }
372
373 if (txc->modes & ADJ_MAXERROR)
374 time_maxerror = txc->maxerror;
375 if (txc->modes & ADJ_ESTERROR)
376 time_esterror = txc->esterror;
377
378 if (txc->modes & ADJ_TIMECONST) {
379 time_constant = txc->constant;
380 if (!(time_status & STA_NANO))
381 time_constant += 4;
382 time_constant = min(time_constant, (long)MAXTC);
383 time_constant = max(time_constant, 0l);
384 }
385
386 if (txc->modes & ADJ_TAI && txc->constant > 0)
387 time_tai = txc->constant;
388
389 if (txc->modes & ADJ_OFFSET)
390 ntp_update_offset(txc->offset);
391 if (txc->modes & ADJ_TICK)
392 tick_usec = txc->tick;
393 494
394 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 495 /* If there are input parameters, then process them: */
395 ntp_update_frequency(); 496 if (txc->modes)
396 } 497 process_adjtimex_modes(txc, &ts);
397 498
398 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 499 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
399 NTP_SCALE_SHIFT); 500 NTP_SCALE_SHIFT);
400 if (!(time_status & STA_NANO)) 501 if (!(time_status & STA_NANO))
401 txc->offset /= NSEC_PER_USEC; 502 txc->offset /= NSEC_PER_USEC;
503 }
402 504
403adj_done:
404 result = time_state; /* mostly `TIME_OK' */ 505 result = time_state; /* mostly `TIME_OK' */
405 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 506 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
406 result = TIME_ERROR; 507 result = TIME_ERROR;
407 508
408 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * 509 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
409 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); 510 PPM_SCALE_INV, NTP_SCALE_SHIFT);
410 txc->maxerror = time_maxerror; 511 txc->maxerror = time_maxerror;
411 txc->esterror = time_esterror; 512 txc->esterror = time_esterror;
412 txc->status = time_status; 513 txc->status = time_status;
@@ -425,6 +526,7 @@ adj_done:
425 txc->calcnt = 0; 526 txc->calcnt = 0;
426 txc->errcnt = 0; 527 txc->errcnt = 0;
427 txc->stbcnt = 0; 528 txc->stbcnt = 0;
529
428 write_sequnlock_irq(&xtime_lock); 530 write_sequnlock_irq(&xtime_lock);
429 531
430 txc->time.tv_sec = ts.tv_sec; 532 txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
440static int __init ntp_tick_adj_setup(char *str) 542static int __init ntp_tick_adj_setup(char *str)
441{ 543{
442 ntp_tick_adj = simple_strtol(str, NULL, 0); 544 ntp_tick_adj = simple_strtol(str, NULL, 0);
545 ntp_tick_adj <<= NTP_SCALE_SHIFT;
546
443 return 1; 547 return 1;
444} 548}
445 549
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
new file mode 100644
index 000000000000..71e7f1a19156
--- /dev/null
+++ b/kernel/time/timecompare.c
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2009 Intel Corporation.
3 * Author: Patrick Ohly <patrick.ohly@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/timecompare.h>
21#include <linux/module.h>
22#include <linux/math64.h>
23
24/*
25 * fixed point arithmetic scale factor for skew
26 *
27 * Usually one would measure skew in ppb (parts per billion, 1e9), but
28 * using a factor of 2 simplifies the math.
29 */
30#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
31
32ktime_t timecompare_transform(struct timecompare *sync,
33 u64 source_tstamp)
34{
35 u64 nsec;
36
37 nsec = source_tstamp + sync->offset;
38 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
39 TIMECOMPARE_SKEW_RESOLUTION;
40
41 return ns_to_ktime(nsec);
42}
43EXPORT_SYMBOL(timecompare_transform);
44
45int timecompare_offset(struct timecompare *sync,
46 s64 *offset,
47 u64 *source_tstamp)
48{
49 u64 start_source = 0, end_source = 0;
50 struct {
51 s64 offset;
52 s64 duration_target;
53 } buffer[10], sample, *samples;
54 int counter = 0, i;
55 int used;
56 int index;
57 int num_samples = sync->num_samples;
58
59 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
60 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
61 if (!samples) {
62 samples = buffer;
63 num_samples = sizeof(buffer)/sizeof(buffer[0]);
64 }
65 } else {
66 samples = buffer;
67 }
68
69 /* run until we have enough valid samples, but do not try forever */
70 i = 0;
71 counter = 0;
72 while (1) {
73 u64 ts;
74 ktime_t start, end;
75
76 start = sync->target();
77 ts = timecounter_read(sync->source);
78 end = sync->target();
79
80 if (!i)
81 start_source = ts;
82
83 /* ignore negative durations */
84 sample.duration_target = ktime_to_ns(ktime_sub(end, start));
85 if (sample.duration_target >= 0) {
86 /*
87 * assume symetric delay to and from source:
88 * average target time corresponds to measured
89 * source time
90 */
91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 -
93 ts;
94
95 /* simple insertion sort based on duration */
96 index = counter - 1;
97 while (index >= 0) {
98 if (samples[index].duration_target <
99 sample.duration_target)
100 break;
101 samples[index + 1] = samples[index];
102 index--;
103 }
104 samples[index + 1] = sample;
105 counter++;
106 }
107
108 i++;
109 if (counter >= num_samples || i >= 100000) {
110 end_source = ts;
111 break;
112 }
113 }
114
115 *source_tstamp = (end_source + start_source) / 2;
116
117 /* remove outliers by only using 75% of the samples */
118 used = counter * 3 / 4;
119 if (!used)
120 used = counter;
121 if (used) {
122 /* calculate average */
123 s64 off = 0;
124 for (index = 0; index < used; index++)
125 off += samples[index].offset;
126 *offset = div_s64(off, used);
127 }
128
129 if (samples && samples != buffer)
130 kfree(samples);
131
132 return used;
133}
134EXPORT_SYMBOL(timecompare_offset);
135
136void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp)
138{
139 s64 offset;
140 u64 average_time;
141
142 if (!timecompare_offset(sync, &offset, &average_time))
143 return;
144
145 if (!sync->last_update) {
146 sync->last_update = average_time;
147 sync->offset = offset;
148 sync->skew = 0;
149 } else {
150 s64 delta_nsec = average_time - sync->last_update;
151
152 /* avoid division by negative or small deltas */
153 if (delta_nsec >= 10000) {
154 s64 delta_offset_nsec = offset - sync->offset;
155 s64 skew; /* delta_offset_nsec *
156 TIMECOMPARE_SKEW_RESOLUTION /
157 delta_nsec */
158 u64 divisor;
159
160 /* div_s64() is limited to 32 bit divisor */
161 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
162 divisor = delta_nsec;
163 while (unlikely(divisor >= ((s64)1) << 32)) {
164 /* divide both by 2; beware, right shift
165 of negative value has undefined
166 behavior and can only be used for
167 the positive divisor */
168 skew = div_s64(skew, 2);
169 divisor >>= 1;
170 }
171 skew = div_s64(skew, divisor);
172
173 /*
174 * Calculate new overall skew as 4/16 the
175 * old value and 12/16 the new one. This is
176 * a rather arbitrary tradeoff between
177 * only using the latest measurement (0/16 and
178 * 16/16) and even more weight on past measurements.
179 */
180#define TIMECOMPARE_NEW_SKEW_PER_16 12
181 sync->skew =
182 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
183 sync->skew +
184 TIMECOMPARE_NEW_SKEW_PER_16 * skew,
185 16);
186 sync->last_update = average_time;
187 sync->offset = offset;
188 }
189 }
190}
191EXPORT_SYMBOL(__timecompare_update);
diff --git a/kernel/timer.c b/kernel/timer.c
index ef1c385bc572..b4555568b4e4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -600,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
600 } 600 }
601} 601}
602 602
603int __mod_timer(struct timer_list *timer, unsigned long expires) 603static inline int
604__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
604{ 605{
605 struct tvec_base *base, *new_base; 606 struct tvec_base *base, *new_base;
606 unsigned long flags; 607 unsigned long flags;
607 int ret = 0; 608 int ret;
609
610 ret = 0;
608 611
609 timer_stats_timer_set_start_info(timer); 612 timer_stats_timer_set_start_info(timer);
610 BUG_ON(!timer->function); 613 BUG_ON(!timer->function);
@@ -614,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
614 if (timer_pending(timer)) { 617 if (timer_pending(timer)) {
615 detach_timer(timer, 0); 618 detach_timer(timer, 0);
616 ret = 1; 619 ret = 1;
620 } else {
621 if (pending_only)
622 goto out_unlock;
617 } 623 }
618 624
619 debug_timer_activate(timer); 625 debug_timer_activate(timer);
@@ -640,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
640 646
641 timer->expires = expires; 647 timer->expires = expires;
642 internal_add_timer(base, timer); 648 internal_add_timer(base, timer);
649
650out_unlock:
643 spin_unlock_irqrestore(&base->lock, flags); 651 spin_unlock_irqrestore(&base->lock, flags);
644 652
645 return ret; 653 return ret;
646} 654}
647 655
648EXPORT_SYMBOL(__mod_timer);
649
650/** 656/**
651 * add_timer_on - start a timer on a particular CPU 657 * mod_timer_pending - modify a pending timer's timeout
652 * @timer: the timer to be added 658 * @timer: the pending timer to be modified
653 * @cpu: the CPU to start it on 659 * @expires: new timeout in jiffies
654 * 660 *
655 * This is not very scalable on SMP. Double adds are not possible. 661 * mod_timer_pending() is the same for pending timers as mod_timer(),
662 * but will not re-activate and modify already deleted timers.
663 *
664 * It is useful for unserialized use of timers.
656 */ 665 */
657void add_timer_on(struct timer_list *timer, int cpu) 666int mod_timer_pending(struct timer_list *timer, unsigned long expires)
658{ 667{
659 struct tvec_base *base = per_cpu(tvec_bases, cpu); 668 return __mod_timer(timer, expires, true);
660 unsigned long flags;
661
662 timer_stats_timer_set_start_info(timer);
663 BUG_ON(timer_pending(timer) || !timer->function);
664 spin_lock_irqsave(&base->lock, flags);
665 timer_set_base(timer, base);
666 debug_timer_activate(timer);
667 internal_add_timer(base, timer);
668 /*
669 * Check whether the other CPU is idle and needs to be
670 * triggered to reevaluate the timer wheel when nohz is
671 * active. We are protected against the other CPU fiddling
672 * with the timer by holding the timer base lock. This also
673 * makes sure that a CPU on the way to idle can not evaluate
674 * the timer wheel.
675 */
676 wake_up_idle_cpu(cpu);
677 spin_unlock_irqrestore(&base->lock, flags);
678} 669}
670EXPORT_SYMBOL(mod_timer_pending);
679 671
680/** 672/**
681 * mod_timer - modify a timer's timeout 673 * mod_timer - modify a timer's timeout
@@ -699,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
699 */ 691 */
700int mod_timer(struct timer_list *timer, unsigned long expires) 692int mod_timer(struct timer_list *timer, unsigned long expires)
701{ 693{
702 BUG_ON(!timer->function);
703
704 timer_stats_timer_set_start_info(timer);
705 /* 694 /*
706 * This is a common optimization triggered by the 695 * This is a common optimization triggered by the
707 * networking code - if the timer is re-modified 696 * networking code - if the timer is re-modified
@@ -710,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
710 if (timer->expires == expires && timer_pending(timer)) 699 if (timer->expires == expires && timer_pending(timer))
711 return 1; 700 return 1;
712 701
713 return __mod_timer(timer, expires); 702 return __mod_timer(timer, expires, false);
714} 703}
715
716EXPORT_SYMBOL(mod_timer); 704EXPORT_SYMBOL(mod_timer);
717 705
718/** 706/**
707 * add_timer - start a timer
708 * @timer: the timer to be added
709 *
710 * The kernel will do a ->function(->data) callback from the
711 * timer interrupt at the ->expires point in the future. The
712 * current time is 'jiffies'.
713 *
714 * The timer's ->expires, ->function (and if the handler uses it, ->data)
715 * fields must be set prior calling this function.
716 *
717 * Timers with an ->expires field in the past will be executed in the next
718 * timer tick.
719 */
720void add_timer(struct timer_list *timer)
721{
722 BUG_ON(timer_pending(timer));
723 mod_timer(timer, timer->expires);
724}
725EXPORT_SYMBOL(add_timer);
726
727/**
728 * add_timer_on - start a timer on a particular CPU
729 * @timer: the timer to be added
730 * @cpu: the CPU to start it on
731 *
732 * This is not very scalable on SMP. Double adds are not possible.
733 */
734void add_timer_on(struct timer_list *timer, int cpu)
735{
736 struct tvec_base *base = per_cpu(tvec_bases, cpu);
737 unsigned long flags;
738
739 timer_stats_timer_set_start_info(timer);
740 BUG_ON(timer_pending(timer) || !timer->function);
741 spin_lock_irqsave(&base->lock, flags);
742 timer_set_base(timer, base);
743 debug_timer_activate(timer);
744 internal_add_timer(base, timer);
745 /*
746 * Check whether the other CPU is idle and needs to be
747 * triggered to reevaluate the timer wheel when nohz is
748 * active. We are protected against the other CPU fiddling
749 * with the timer by holding the timer base lock. This also
750 * makes sure that a CPU on the way to idle can not evaluate
751 * the timer wheel.
752 */
753 wake_up_idle_cpu(cpu);
754 spin_unlock_irqrestore(&base->lock, flags);
755}
756
757/**
719 * del_timer - deactive a timer. 758 * del_timer - deactive a timer.
720 * @timer: the timer to be deactivated 759 * @timer: the timer to be deactivated
721 * 760 *
@@ -744,7 +783,6 @@ int del_timer(struct timer_list *timer)
744 783
745 return ret; 784 return ret;
746} 785}
747
748EXPORT_SYMBOL(del_timer); 786EXPORT_SYMBOL(del_timer);
749 787
750#ifdef CONFIG_SMP 788#ifdef CONFIG_SMP
@@ -778,7 +816,6 @@ out:
778 816
779 return ret; 817 return ret;
780} 818}
781
782EXPORT_SYMBOL(try_to_del_timer_sync); 819EXPORT_SYMBOL(try_to_del_timer_sync);
783 820
784/** 821/**
@@ -816,7 +853,6 @@ int del_timer_sync(struct timer_list *timer)
816 cpu_relax(); 853 cpu_relax();
817 } 854 }
818} 855}
819
820EXPORT_SYMBOL(del_timer_sync); 856EXPORT_SYMBOL(del_timer_sync);
821#endif 857#endif
822 858
@@ -1314,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout)
1314 expire = timeout + jiffies; 1350 expire = timeout + jiffies;
1315 1351
1316 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1352 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1317 __mod_timer(&timer, expire); 1353 __mod_timer(&timer, expire, false);
1318 schedule(); 1354 schedule();
1319 del_singleshot_timer_sync(&timer); 1355 del_singleshot_timer_sync(&timer);
1320 1356
diff --git a/kernel/user.c b/kernel/user.c
index fbb300e6191f..850e0ba41c1e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -20,7 +20,7 @@
20 20
21struct user_namespace init_user_ns = { 21struct user_namespace init_user_ns = {
22 .kref = { 22 .kref = {
23 .refcount = ATOMIC_INIT(1), 23 .refcount = ATOMIC_INIT(2),
24 }, 24 },
25 .creator = &root_user, 25 .creator = &root_user,
26}; 26};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e53ee18ef431..3003ecad08f4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -423,7 +423,7 @@ void flush_workqueue(struct workqueue_struct *wq)
423 might_sleep(); 423 might_sleep();
424 lock_map_acquire(&wq->lockdep_map); 424 lock_map_acquire(&wq->lockdep_map);
425 lock_map_release(&wq->lockdep_map); 425 lock_map_release(&wq->lockdep_map);
426 for_each_cpu_mask_nr(cpu, *cpu_map) 426 for_each_cpu(cpu, cpu_map)
427 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 427 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
428} 428}
429EXPORT_SYMBOL_GPL(flush_workqueue); 429EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -554,7 +554,7 @@ static void wait_on_work(struct work_struct *work)
554 wq = cwq->wq; 554 wq = cwq->wq;
555 cpu_map = wq_cpu_map(wq); 555 cpu_map = wq_cpu_map(wq);
556 556
557 for_each_cpu_mask_nr(cpu, *cpu_map) 557 for_each_cpu(cpu, cpu_map)
558 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 558 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
559} 559}
560 560
@@ -925,7 +925,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
925 list_del(&wq->list); 925 list_del(&wq->list);
926 spin_unlock(&workqueue_lock); 926 spin_unlock(&workqueue_lock);
927 927
928 for_each_cpu_mask_nr(cpu, *cpu_map) 928 for_each_cpu(cpu, cpu_map)
929 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 929 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
930 cpu_maps_update_done(); 930 cpu_maps_update_done();
931 931