aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c13
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c24
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h5
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/irq/numa_migrate.c38
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/disk.c21
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/sched.c302
-rw-r--r--kernel/sched_cpupri.c2
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/sched_idletask.c3
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c86
-rw-r--r--kernel/wait.c2
27 files changed, 395 insertions, 254 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 968ef9457d4e..27235f5de198 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -92,19 +92,18 @@ extern int initcall_debug;
92static async_cookie_t __lowest_in_progress(struct list_head *running) 92static async_cookie_t __lowest_in_progress(struct list_head *running)
93{ 93{
94 struct async_entry *entry; 94 struct async_entry *entry;
95
95 if (!list_empty(running)) { 96 if (!list_empty(running)) {
96 entry = list_first_entry(running, 97 entry = list_first_entry(running,
97 struct async_entry, list); 98 struct async_entry, list);
98 return entry->cookie; 99 return entry->cookie;
99 } else if (!list_empty(&async_pending)) {
100 entry = list_first_entry(&async_pending,
101 struct async_entry, list);
102 return entry->cookie;
103 } else {
104 /* nothing in progress... next_cookie is "infinity" */
105 return next_cookie;
106 } 100 }
107 101
102 list_for_each_entry(entry, &async_pending, list)
103 if (entry->running == running)
104 return entry->cookie;
105
106 return next_cookie; /* "infinity" value */
108} 107}
109 108
110static async_cookie_t lowest_in_progress(struct list_head *running) 109static async_cookie_t lowest_in_progress(struct list_head *running)
diff --git a/kernel/fork.c b/kernel/fork.c
index b9e2edd00726..875ffbdd96d0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1409,7 +1409,7 @@ long do_fork(unsigned long clone_flags,
1409 } 1409 }
1410 1410
1411 audit_finish_fork(p); 1411 audit_finish_fork(p);
1412 tracehook_report_clone(trace, regs, clone_flags, nr, p); 1412 tracehook_report_clone(regs, clone_flags, nr, p);
1413 1413
1414 /* 1414 /*
1415 * We set PF_STARTING at creation in case tracing wants to 1415 * We set PF_STARTING at creation in case tracing wants to
diff --git a/kernel/futex.c b/kernel/futex.c
index eef8cd26b5e5..d546b2d53a62 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -193,6 +193,7 @@ static void drop_futex_key_refs(union futex_key *key)
193 * @uaddr: virtual address of the futex 193 * @uaddr: virtual address of the futex
194 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 194 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
195 * @key: address where result is stored. 195 * @key: address where result is stored.
196 * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
196 * 197 *
197 * Returns a negative error code or 0 198 * Returns a negative error code or 0
198 * The key words are stored in *key on success. 199 * The key words are stored in *key on success.
@@ -203,7 +204,8 @@ static void drop_futex_key_refs(union futex_key *key)
203 * 204 *
204 * lock_page() might sleep, the caller should not hold a spinlock. 205 * lock_page() might sleep, the caller should not hold a spinlock.
205 */ 206 */
206static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) 207static int
208get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
207{ 209{
208 unsigned long address = (unsigned long)uaddr; 210 unsigned long address = (unsigned long)uaddr;
209 struct mm_struct *mm = current->mm; 211 struct mm_struct *mm = current->mm;
@@ -226,7 +228,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
226 * but access_ok() should be faster than find_vma() 228 * but access_ok() should be faster than find_vma()
227 */ 229 */
228 if (!fshared) { 230 if (!fshared) {
229 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) 231 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
230 return -EFAULT; 232 return -EFAULT;
231 key->private.mm = mm; 233 key->private.mm = mm;
232 key->private.address = address; 234 key->private.address = address;
@@ -235,7 +237,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
235 } 237 }
236 238
237again: 239again:
238 err = get_user_pages_fast(address, 1, 0, &page); 240 err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
239 if (err < 0) 241 if (err < 0)
240 return err; 242 return err;
241 243
@@ -677,7 +679,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
677 if (!bitset) 679 if (!bitset)
678 return -EINVAL; 680 return -EINVAL;
679 681
680 ret = get_futex_key(uaddr, fshared, &key); 682 ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
681 if (unlikely(ret != 0)) 683 if (unlikely(ret != 0))
682 goto out; 684 goto out;
683 685
@@ -723,10 +725,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
723 int ret, op_ret; 725 int ret, op_ret;
724 726
725retry: 727retry:
726 ret = get_futex_key(uaddr1, fshared, &key1); 728 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
727 if (unlikely(ret != 0)) 729 if (unlikely(ret != 0))
728 goto out; 730 goto out;
729 ret = get_futex_key(uaddr2, fshared, &key2); 731 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
730 if (unlikely(ret != 0)) 732 if (unlikely(ret != 0))
731 goto out_put_key1; 733 goto out_put_key1;
732 734
@@ -814,10 +816,10 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
814 int ret, drop_count = 0; 816 int ret, drop_count = 0;
815 817
816retry: 818retry:
817 ret = get_futex_key(uaddr1, fshared, &key1); 819 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
818 if (unlikely(ret != 0)) 820 if (unlikely(ret != 0))
819 goto out; 821 goto out;
820 ret = get_futex_key(uaddr2, fshared, &key2); 822 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ);
821 if (unlikely(ret != 0)) 823 if (unlikely(ret != 0))
822 goto out_put_key1; 824 goto out_put_key1;
823 825
@@ -1140,7 +1142,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1140 q.bitset = bitset; 1142 q.bitset = bitset;
1141retry: 1143retry:
1142 q.key = FUTEX_KEY_INIT; 1144 q.key = FUTEX_KEY_INIT;
1143 ret = get_futex_key(uaddr, fshared, &q.key); 1145 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
1144 if (unlikely(ret != 0)) 1146 if (unlikely(ret != 0))
1145 goto out; 1147 goto out;
1146 1148
@@ -1330,7 +1332,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1330 q.pi_state = NULL; 1332 q.pi_state = NULL;
1331retry: 1333retry:
1332 q.key = FUTEX_KEY_INIT; 1334 q.key = FUTEX_KEY_INIT;
1333 ret = get_futex_key(uaddr, fshared, &q.key); 1335 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1334 if (unlikely(ret != 0)) 1336 if (unlikely(ret != 0))
1335 goto out; 1337 goto out;
1336 1338
@@ -1594,7 +1596,7 @@ retry:
1594 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) 1596 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1595 return -EPERM; 1597 return -EPERM;
1596 1598
1597 ret = get_futex_key(uaddr, fshared, &key); 1599 ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
1598 if (unlikely(ret != 0)) 1600 if (unlikely(ret != 0))
1599 goto out; 1601 goto out;
1600 1602
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 3394f8f52964..7d047808419d 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o 6obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
7obj-$(CONFIG_PM_SLEEP) += pm.o 7obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c687ba4363f2..13c68e71b726 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,7 +359,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
359 359
360 spin_lock(&desc->lock); 360 spin_lock(&desc->lock);
361 mask_ack_irq(desc, irq); 361 mask_ack_irq(desc, irq);
362 desc = irq_remap_to_desc(irq, desc);
363 362
364 if (unlikely(desc->status & IRQ_INPROGRESS)) 363 if (unlikely(desc->status & IRQ_INPROGRESS))
365 goto out_unlock; 364 goto out_unlock;
@@ -438,7 +437,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
438 desc->status &= ~IRQ_INPROGRESS; 437 desc->status &= ~IRQ_INPROGRESS;
439out: 438out:
440 desc->chip->eoi(irq); 439 desc->chip->eoi(irq);
441 desc = irq_remap_to_desc(irq, desc);
442 440
443 spin_unlock(&desc->lock); 441 spin_unlock(&desc->lock);
444} 442}
@@ -475,7 +473,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
475 !desc->action)) { 473 !desc->action)) {
476 desc->status |= (IRQ_PENDING | IRQ_MASKED); 474 desc->status |= (IRQ_PENDING | IRQ_MASKED);
477 mask_ack_irq(desc, irq); 475 mask_ack_irq(desc, irq);
478 desc = irq_remap_to_desc(irq, desc);
479 goto out_unlock; 476 goto out_unlock;
480 } 477 }
481 kstat_incr_irqs_this_cpu(irq, desc); 478 kstat_incr_irqs_this_cpu(irq, desc);
@@ -483,7 +480,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
483 /* Start handling the irq */ 480 /* Start handling the irq */
484 if (desc->chip->ack) 481 if (desc->chip->ack)
485 desc->chip->ack(irq); 482 desc->chip->ack(irq);
486 desc = irq_remap_to_desc(irq, desc);
487 483
488 /* Mark the IRQ currently in progress.*/ 484 /* Mark the IRQ currently in progress.*/
489 desc->status |= IRQ_INPROGRESS; 485 desc->status |= IRQ_INPROGRESS;
@@ -544,10 +540,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
544 if (!noirqdebug) 540 if (!noirqdebug)
545 note_interrupt(irq, desc, action_ret); 541 note_interrupt(irq, desc, action_ret);
546 542
547 if (desc->chip->eoi) { 543 if (desc->chip->eoi)
548 desc->chip->eoi(irq); 544 desc->chip->eoi(irq);
549 desc = irq_remap_to_desc(irq, desc);
550 }
551} 545}
552 546
553void 547void
@@ -582,10 +576,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
582 576
583 /* Uninstall? */ 577 /* Uninstall? */
584 if (handle == handle_bad_irq) { 578 if (handle == handle_bad_irq) {
585 if (desc->chip != &no_irq_chip) { 579 if (desc->chip != &no_irq_chip)
586 mask_ack_irq(desc, irq); 580 mask_ack_irq(desc, irq);
587 desc = irq_remap_to_desc(irq, desc);
588 }
589 desc->status |= IRQ_DISABLED; 581 desc->status |= IRQ_DISABLED;
590 desc->depth = 1; 582 desc->depth = 1;
591 } 583 }
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 26e08754744f..18041a254d32 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/slab.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/random.h> 16#include <linux/random.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -81,45 +82,48 @@ static struct irq_desc irq_desc_init = {
81 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 82 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
82}; 83};
83 84
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 85void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
85{ 86{
86 int node;
87 void *ptr; 87 void *ptr;
88 88
89 node = cpu_to_node(cpu); 89 if (slab_is_available())
90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 GFP_ATOMIC, node);
92 else
93 ptr = alloc_bootmem_node(NODE_DATA(node),
94 nr * sizeof(*desc->kstat_irqs));
91 95
92 /* 96 /*
93 * don't overwite if can not get new one 97 * don't overwite if can not get new one
94 * init_copy_kstat_irqs() could still use old one 98 * init_copy_kstat_irqs() could still use old one
95 */ 99 */
96 if (ptr) { 100 if (ptr) {
97 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", 101 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
98 cpu, node);
99 desc->kstat_irqs = ptr; 102 desc->kstat_irqs = ptr;
100 } 103 }
101} 104}
102 105
103static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 106static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
104{ 107{
105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 108 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
106 109
107 spin_lock_init(&desc->lock); 110 spin_lock_init(&desc->lock);
108 desc->irq = irq; 111 desc->irq = irq;
109#ifdef CONFIG_SMP 112#ifdef CONFIG_SMP
110 desc->cpu = cpu; 113 desc->node = node;
111#endif 114#endif
112 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 115 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
113 init_kstat_irqs(desc, cpu, nr_cpu_ids); 116 init_kstat_irqs(desc, node, nr_cpu_ids);
114 if (!desc->kstat_irqs) { 117 if (!desc->kstat_irqs) {
115 printk(KERN_ERR "can not alloc kstat_irqs\n"); 118 printk(KERN_ERR "can not alloc kstat_irqs\n");
116 BUG_ON(1); 119 BUG_ON(1);
117 } 120 }
118 if (!init_alloc_desc_masks(desc, cpu, false)) { 121 if (!alloc_desc_masks(desc, node, false)) {
119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 122 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 BUG_ON(1); 123 BUG_ON(1);
121 } 124 }
122 arch_init_chip_data(desc, cpu); 125 init_desc_masks(desc);
126 arch_init_chip_data(desc, node);
123} 127}
124 128
125/* 129/*
@@ -169,7 +173,8 @@ int __init early_irq_init(void)
169 desc[i].irq = i; 173 desc[i].irq = i;
170 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 174 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
171 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 175 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
172 init_alloc_desc_masks(&desc[i], 0, true); 176 alloc_desc_masks(&desc[i], 0, true);
177 init_desc_masks(&desc[i]);
173 irq_desc_ptrs[i] = desc + i; 178 irq_desc_ptrs[i] = desc + i;
174 } 179 }
175 180
@@ -187,11 +192,10 @@ struct irq_desc *irq_to_desc(unsigned int irq)
187 return NULL; 192 return NULL;
188} 193}
189 194
190struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 195struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
191{ 196{
192 struct irq_desc *desc; 197 struct irq_desc *desc;
193 unsigned long flags; 198 unsigned long flags;
194 int node;
195 199
196 if (irq >= nr_irqs) { 200 if (irq >= nr_irqs) {
197 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 201 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
@@ -210,15 +214,17 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
210 if (desc) 214 if (desc)
211 goto out_unlock; 215 goto out_unlock;
212 216
213 node = cpu_to_node(cpu); 217 if (slab_is_available())
214 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 218 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
215 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", 219 else
216 irq, cpu, node); 220 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
221
222 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
217 if (!desc) { 223 if (!desc) {
218 printk(KERN_ERR "can not alloc irq_desc\n"); 224 printk(KERN_ERR "can not alloc irq_desc\n");
219 BUG_ON(1); 225 BUG_ON(1);
220 } 226 }
221 init_one_irq_desc(irq, desc, cpu); 227 init_one_irq_desc(irq, desc, node);
222 228
223 irq_desc_ptrs[irq] = desc; 229 irq_desc_ptrs[irq] = desc;
224 230
@@ -256,7 +262,8 @@ int __init early_irq_init(void)
256 262
257 for (i = 0; i < count; i++) { 263 for (i = 0; i < count; i++) {
258 desc[i].irq = i; 264 desc[i].irq = i;
259 init_alloc_desc_masks(&desc[i], 0, true); 265 alloc_desc_masks(&desc[i], 0, true);
266 init_desc_masks(&desc[i]);
260 desc[i].kstat_irqs = kstat_irqs_all[i]; 267 desc[i].kstat_irqs = kstat_irqs_all[i];
261 } 268 }
262 return arch_early_irq_init(); 269 return arch_early_irq_init();
@@ -267,7 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
267 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 274 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
268} 275}
269 276
270struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 277struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
271{ 278{
272 return irq_to_desc(irq); 279 return irq_to_desc(irq);
273} 280}
@@ -453,11 +460,8 @@ unsigned int __do_IRQ(unsigned int irq)
453 /* 460 /*
454 * No locking required for CPU-local interrupts: 461 * No locking required for CPU-local interrupts:
455 */ 462 */
456 if (desc->chip->ack) { 463 if (desc->chip->ack)
457 desc->chip->ack(irq); 464 desc->chip->ack(irq);
458 /* get new one */
459 desc = irq_remap_to_desc(irq, desc);
460 }
461 if (likely(!(desc->status & IRQ_DISABLED))) { 465 if (likely(!(desc->status & IRQ_DISABLED))) {
462 action_ret = handle_IRQ_event(irq, desc->action); 466 action_ret = handle_IRQ_event(irq, desc->action);
463 if (!noirqdebug) 467 if (!noirqdebug)
@@ -468,10 +472,8 @@ unsigned int __do_IRQ(unsigned int irq)
468 } 472 }
469 473
470 spin_lock(&desc->lock); 474 spin_lock(&desc->lock);
471 if (desc->chip->ack) { 475 if (desc->chip->ack)
472 desc->chip->ack(irq); 476 desc->chip->ack(irq);
473 desc = irq_remap_to_desc(irq, desc);
474 }
475 /* 477 /*
476 * REPLAY is when Linux resends an IRQ that was dropped earlier 478 * REPLAY is when Linux resends an IRQ that was dropped earlier
477 * WAITING is used by probe to mark irqs that are being tested 479 * WAITING is used by probe to mark irqs that are being tested
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 01ce20eab38f..73468253143b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
16extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 16extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
17 17
18extern struct lock_class_key irq_desc_lock_class; 18extern struct lock_class_key irq_desc_lock_class;
19extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc); 20extern void clear_kstat_irqs(struct irq_desc *desc);
21extern spinlock_t sparse_irq_lock; 21extern spinlock_t sparse_irq_lock;
22 22
@@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq,
42 42
43extern int irq_select_affinity_usr(unsigned int irq); 43extern int irq_select_affinity_usr(unsigned int irq);
44 44
45extern void
46irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
47
45/* 48/*
46 * Debugging printout: 49 * Debugging printout:
47 */ 50 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2734eca59243..aaf5c9d05770 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq)
80 return 1; 80 return 1;
81} 81}
82 82
83static void 83void
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
85{ 85{
86 struct irqaction *action = desc->action; 86 struct irqaction *action = desc->action;
@@ -109,17 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT) 112 if (desc->status & IRQ_MOVE_PCNTXT) {
113 desc->chip->set_affinity(irq, cpumask); 113 if (!desc->chip->set_affinity(irq, cpumask)) {
114 cpumask_copy(desc->affinity, cpumask);
115 irq_set_thread_affinity(desc, cpumask);
116 }
117 }
114 else { 118 else {
115 desc->status |= IRQ_MOVE_PENDING; 119 desc->status |= IRQ_MOVE_PENDING;
116 cpumask_copy(desc->pending_mask, cpumask); 120 cpumask_copy(desc->pending_mask, cpumask);
117 } 121 }
118#else 122#else
119 cpumask_copy(desc->affinity, cpumask); 123 if (!desc->chip->set_affinity(irq, cpumask)) {
120 desc->chip->set_affinity(irq, cpumask); 124 cpumask_copy(desc->affinity, cpumask);
125 irq_set_thread_affinity(desc, cpumask);
126 }
121#endif 127#endif
122 irq_set_thread_affinity(desc, cpumask);
123 desc->status |= IRQ_AFFINITY_SET; 128 desc->status |= IRQ_AFFINITY_SET;
124 spin_unlock_irqrestore(&desc->lock, flags); 129 spin_unlock_irqrestore(&desc->lock, flags);
125 return 0; 130 return 0;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index e05ad9be43b7..cfe767ca1545 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,5 +1,8 @@
1 1
2#include <linux/irq.h> 2#include <linux/irq.h>
3#include <linux/interrupt.h>
4
5#include "internals.h"
3 6
4void move_masked_irq(int irq) 7void move_masked_irq(int irq)
5{ 8{
@@ -39,11 +42,12 @@ void move_masked_irq(int irq)
39 * masking the irqs. 42 * masking the irqs.
40 */ 43 */
41 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 < nr_cpu_ids)) { 45 < nr_cpu_ids))
43 cpumask_and(desc->affinity, 46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
44 desc->pending_mask, cpu_online_mask); 47 cpumask_copy(desc->affinity, desc->pending_mask);
45 desc->chip->set_affinity(irq, desc->affinity); 48 irq_set_thread_affinity(desc, desc->pending_mask);
46 } 49 }
50
47 cpumask_clear(desc->pending_mask); 51 cpumask_clear(desc->pending_mask);
48} 52}
49 53
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 44bbdcbaf8d2..2f69bee57bf2 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -15,9 +15,9 @@
15 15
16static void init_copy_kstat_irqs(struct irq_desc *old_desc, 16static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc, 17 struct irq_desc *desc,
18 int cpu, int nr) 18 int node, int nr)
19{ 19{
20 init_kstat_irqs(desc, cpu, nr); 20 init_kstat_irqs(desc, node, nr);
21 21
22 if (desc->kstat_irqs != old_desc->kstat_irqs) 22 if (desc->kstat_irqs != old_desc->kstat_irqs)
23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, 23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
@@ -34,20 +34,20 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
34} 34}
35 35
36static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 36static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
37 struct irq_desc *desc, int cpu) 37 struct irq_desc *desc, int node)
38{ 38{
39 memcpy(desc, old_desc, sizeof(struct irq_desc)); 39 memcpy(desc, old_desc, sizeof(struct irq_desc));
40 if (!init_alloc_desc_masks(desc, cpu, false)) { 40 if (!alloc_desc_masks(desc, node, false)) {
41 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " 41 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
42 "for migration.\n", irq); 42 "for migration.\n", irq);
43 return false; 43 return false;
44 } 44 }
45 spin_lock_init(&desc->lock); 45 spin_lock_init(&desc->lock);
46 desc->cpu = cpu; 46 desc->node = node;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
49 init_copy_desc_masks(old_desc, desc); 49 init_copy_desc_masks(old_desc, desc);
50 arch_init_copy_chip_data(old_desc, desc, cpu); 50 arch_init_copy_chip_data(old_desc, desc, node);
51 return true; 51 return true;
52} 52}
53 53
@@ -59,12 +59,11 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
59} 59}
60 60
61static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, 61static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
62 int cpu) 62 int node)
63{ 63{
64 struct irq_desc *desc; 64 struct irq_desc *desc;
65 unsigned int irq; 65 unsigned int irq;
66 unsigned long flags; 66 unsigned long flags;
67 int node;
68 67
69 irq = old_desc->irq; 68 irq = old_desc->irq;
70 69
@@ -76,7 +75,6 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
76 if (desc && old_desc != desc) 75 if (desc && old_desc != desc)
77 goto out_unlock; 76 goto out_unlock;
78 77
79 node = cpu_to_node(cpu);
80 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 78 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
81 if (!desc) { 79 if (!desc) {
82 printk(KERN_ERR "irq %d: can not get new irq_desc " 80 printk(KERN_ERR "irq %d: can not get new irq_desc "
@@ -85,7 +83,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
85 desc = old_desc; 83 desc = old_desc;
86 goto out_unlock; 84 goto out_unlock;
87 } 85 }
88 if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { 86 if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
89 /* still use old one */ 87 /* still use old one */
90 kfree(desc); 88 kfree(desc);
91 desc = old_desc; 89 desc = old_desc;
@@ -97,9 +95,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
97 95
98 /* free the old one */ 96 /* free the old one */
99 free_one_irq_desc(old_desc, desc); 97 free_one_irq_desc(old_desc, desc);
100 spin_unlock(&old_desc->lock);
101 kfree(old_desc); 98 kfree(old_desc);
102 spin_lock(&desc->lock);
103 99
104 return desc; 100 return desc;
105 101
@@ -109,24 +105,14 @@ out_unlock:
109 return desc; 105 return desc;
110} 106}
111 107
112struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) 108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
113{ 109{
114 int old_cpu;
115 int node, old_node;
116
117 /* those all static, do move them */ 110 /* those all static, do move them */
118 if (desc->irq < NR_IRQS_LEGACY) 111 if (desc->irq < NR_IRQS_LEGACY)
119 return desc; 112 return desc;
120 113
121 old_cpu = desc->cpu; 114 if (desc->node != node)
122 if (old_cpu != cpu) { 115 desc = __real_move_irq_desc(desc, node);
123 node = cpu_to_node(cpu);
124 old_node = cpu_to_node(old_cpu);
125 if (old_node != node)
126 desc = __real_move_irq_desc(desc, cpu);
127 else
128 desc->cpu = cpu;
129 }
130 116
131 return desc; 117 return desc;
132} 118}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 5a758c6e4950..e4983770913b 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1451,7 +1451,6 @@ int kernel_kexec(void)
1451 error = device_suspend(PMSG_FREEZE); 1451 error = device_suspend(PMSG_FREEZE);
1452 if (error) 1452 if (error)
1453 goto Resume_console; 1453 goto Resume_console;
1454 device_pm_lock();
1455 /* At this point, device_suspend() has been called, 1454 /* At this point, device_suspend() has been called,
1456 * but *not* device_power_down(). We *must* 1455 * but *not* device_power_down(). We *must*
1457 * device_power_down() now. Otherwise, drivers for 1456 * device_power_down() now. Otherwise, drivers for
@@ -1489,7 +1488,6 @@ int kernel_kexec(void)
1489 enable_nonboot_cpus(); 1488 enable_nonboot_cpus();
1490 device_power_up(PMSG_RESTORE); 1489 device_power_up(PMSG_RESTORE);
1491 Resume_devices: 1490 Resume_devices:
1492 device_pm_unlock();
1493 device_resume(PMSG_RESTORE); 1491 device_resume(PMSG_RESTORE);
1494 Resume_console: 1492 Resume_console:
1495 resume_console(); 1493 resume_console();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b750675251e5..7e95bedb2bfc 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -370,8 +370,10 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
370 sub_info->argv = argv; 370 sub_info->argv = argv;
371 sub_info->envp = envp; 371 sub_info->envp = envp;
372 sub_info->cred = prepare_usermodehelper_creds(); 372 sub_info->cred = prepare_usermodehelper_creds();
373 if (!sub_info->cred) 373 if (!sub_info->cred) {
374 kfree(sub_info);
374 return NULL; 375 return NULL;
376 }
375 377
376 out: 378 out:
377 return sub_info; 379 return sub_info;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 507cf2b5e9f1..6ca5fe96e393 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -249,7 +249,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
249 249
250 /* didnt get the lock, go to sleep: */ 250 /* didnt get the lock, go to sleep: */
251 spin_unlock_mutex(&lock->wait_lock, flags); 251 spin_unlock_mutex(&lock->wait_lock, flags);
252 __schedule(); 252 preempt_enable_no_resched();
253 schedule();
254 preempt_disable();
253 spin_lock_mutex(&lock->wait_lock, flags); 255 spin_lock_mutex(&lock->wait_lock, flags);
254 } 256 }
255 257
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b0dc9e7a0d17..5cb080e7eebd 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -215,8 +215,6 @@ static int create_image(int platform_mode)
215 if (error) 215 if (error)
216 return error; 216 return error;
217 217
218 device_pm_lock();
219
220 /* At this point, device_suspend() has been called, but *not* 218 /* At this point, device_suspend() has been called, but *not*
221 * device_power_down(). We *must* call device_power_down() now. 219 * device_power_down(). We *must* call device_power_down() now.
222 * Otherwise, drivers for some devices (e.g. interrupt controllers) 220 * Otherwise, drivers for some devices (e.g. interrupt controllers)
@@ -227,7 +225,7 @@ static int create_image(int platform_mode)
227 if (error) { 225 if (error) {
228 printk(KERN_ERR "PM: Some devices failed to power down, " 226 printk(KERN_ERR "PM: Some devices failed to power down, "
229 "aborting hibernation\n"); 227 "aborting hibernation\n");
230 goto Unlock; 228 return error;
231 } 229 }
232 230
233 error = platform_pre_snapshot(platform_mode); 231 error = platform_pre_snapshot(platform_mode);
@@ -280,9 +278,6 @@ static int create_image(int platform_mode)
280 device_power_up(in_suspend ? 278 device_power_up(in_suspend ?
281 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
282 280
283 Unlock:
284 device_pm_unlock();
285
286 return error; 281 return error;
287} 282}
288 283
@@ -344,13 +339,11 @@ static int resume_target_kernel(bool platform_mode)
344{ 339{
345 int error; 340 int error;
346 341
347 device_pm_lock();
348
349 error = device_power_down(PMSG_QUIESCE); 342 error = device_power_down(PMSG_QUIESCE);
350 if (error) { 343 if (error) {
351 printk(KERN_ERR "PM: Some devices failed to power down, " 344 printk(KERN_ERR "PM: Some devices failed to power down, "
352 "aborting resume\n"); 345 "aborting resume\n");
353 goto Unlock; 346 return error;
354 } 347 }
355 348
356 error = platform_pre_restore(platform_mode); 349 error = platform_pre_restore(platform_mode);
@@ -403,9 +396,6 @@ static int resume_target_kernel(bool platform_mode)
403 396
404 device_power_up(PMSG_RECOVER); 397 device_power_up(PMSG_RECOVER);
405 398
406 Unlock:
407 device_pm_unlock();
408
409 return error; 399 return error;
410} 400}
411 401
@@ -464,11 +454,9 @@ int hibernation_platform_enter(void)
464 goto Resume_devices; 454 goto Resume_devices;
465 } 455 }
466 456
467 device_pm_lock();
468
469 error = device_power_down(PMSG_HIBERNATE); 457 error = device_power_down(PMSG_HIBERNATE);
470 if (error) 458 if (error)
471 goto Unlock; 459 goto Resume_devices;
472 460
473 error = hibernation_ops->prepare(); 461 error = hibernation_ops->prepare();
474 if (error) 462 if (error)
@@ -493,9 +481,6 @@ int hibernation_platform_enter(void)
493 481
494 device_power_up(PMSG_RESTORE); 482 device_power_up(PMSG_RESTORE);
495 483
496 Unlock:
497 device_pm_unlock();
498
499 Resume_devices: 484 Resume_devices:
500 entering_platform_hibernation = false; 485 entering_platform_hibernation = false;
501 device_resume(PMSG_RESTORE); 486 device_resume(PMSG_RESTORE);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f99ed6a75eac..868028280d13 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -289,12 +289,10 @@ static int suspend_enter(suspend_state_t state)
289{ 289{
290 int error; 290 int error;
291 291
292 device_pm_lock();
293
294 if (suspend_ops->prepare) { 292 if (suspend_ops->prepare) {
295 error = suspend_ops->prepare(); 293 error = suspend_ops->prepare();
296 if (error) 294 if (error)
297 goto Done; 295 return error;
298 } 296 }
299 297
300 error = device_power_down(PMSG_SUSPEND); 298 error = device_power_down(PMSG_SUSPEND);
@@ -343,9 +341,6 @@ static int suspend_enter(suspend_state_t state)
343 if (suspend_ops->finish) 341 if (suspend_ops->finish)
344 suspend_ops->finish(); 342 suspend_ops->finish();
345 343
346 Done:
347 device_pm_unlock();
348
349 return error; 344 return error;
350} 345}
351 346
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0692ab5a0d67..42c317874cfa 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -304,6 +304,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
304 if (child->ptrace) { 304 if (child->ptrace) {
305 child->exit_code = data; 305 child->exit_code = data;
306 dead = __ptrace_detach(current, child); 306 dead = __ptrace_detach(current, child);
307 if (!child->exit_state)
308 wake_up_process(child);
307 } 309 }
308 write_unlock_irq(&tasklist_lock); 310 write_unlock_irq(&tasklist_lock);
309 311
diff --git a/kernel/sched.c b/kernel/sched.c
index 26efa475bdc1..c3c04e256560 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -630,6 +630,10 @@ struct rq {
630 struct list_head migration_queue; 630 struct list_head migration_queue;
631#endif 631#endif
632 632
633 /* calc_load related fields */
634 unsigned long calc_load_update;
635 long calc_load_active;
636
633#ifdef CONFIG_SCHED_HRTICK 637#ifdef CONFIG_SCHED_HRTICK
634#ifdef CONFIG_SMP 638#ifdef CONFIG_SMP
635 int hrtick_csd_pending; 639 int hrtick_csd_pending;
@@ -1728,6 +1732,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1728} 1732}
1729#endif 1733#endif
1730 1734
1735static void calc_load_account_active(struct rq *this_rq);
1736
1731#include "sched_stats.h" 1737#include "sched_stats.h"
1732#include "sched_idletask.c" 1738#include "sched_idletask.c"
1733#include "sched_fair.c" 1739#include "sched_fair.c"
@@ -2458,6 +2464,17 @@ out:
2458 return success; 2464 return success;
2459} 2465}
2460 2466
2467/**
2468 * wake_up_process - Wake up a specific process
2469 * @p: The process to be woken up.
2470 *
2471 * Attempt to wake up the nominated process and move it to the set of runnable
2472 * processes. Returns 1 if the process was woken up, 0 if it was already
2473 * running.
2474 *
2475 * It may be assumed that this function implies a write memory barrier before
2476 * changing the task state if and only if any tasks are woken up.
2477 */
2461int wake_up_process(struct task_struct *p) 2478int wake_up_process(struct task_struct *p)
2462{ 2479{
2463 return try_to_wake_up(p, TASK_ALL, 0); 2480 return try_to_wake_up(p, TASK_ALL, 0);
@@ -2856,19 +2873,72 @@ unsigned long nr_iowait(void)
2856 return sum; 2873 return sum;
2857} 2874}
2858 2875
2859unsigned long nr_active(void) 2876/* Variables and functions for calc_load */
2877static atomic_long_t calc_load_tasks;
2878static unsigned long calc_load_update;
2879unsigned long avenrun[3];
2880EXPORT_SYMBOL(avenrun);
2881
2882/**
2883 * get_avenrun - get the load average array
2884 * @loads: pointer to dest load array
2885 * @offset: offset to add
2886 * @shift: shift count to shift the result left
2887 *
2888 * These values are estimates at best, so no need for locking.
2889 */
2890void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2860{ 2891{
2861 unsigned long i, running = 0, uninterruptible = 0; 2892 loads[0] = (avenrun[0] + offset) << shift;
2893 loads[1] = (avenrun[1] + offset) << shift;
2894 loads[2] = (avenrun[2] + offset) << shift;
2895}
2862 2896
2863 for_each_online_cpu(i) { 2897static unsigned long
2864 running += cpu_rq(i)->nr_running; 2898calc_load(unsigned long load, unsigned long exp, unsigned long active)
2865 uninterruptible += cpu_rq(i)->nr_uninterruptible; 2899{
2866 } 2900 load *= exp;
2901 load += active * (FIXED_1 - exp);
2902 return load >> FSHIFT;
2903}
2867 2904
2868 if (unlikely((long)uninterruptible < 0)) 2905/*
2869 uninterruptible = 0; 2906 * calc_load - update the avenrun load estimates 10 ticks after the
2907 * CPUs have updated calc_load_tasks.
2908 */
2909void calc_global_load(void)
2910{
2911 unsigned long upd = calc_load_update + 10;
2912 long active;
2913
2914 if (time_before(jiffies, upd))
2915 return;
2916
2917 active = atomic_long_read(&calc_load_tasks);
2918 active = active > 0 ? active * FIXED_1 : 0;
2870 2919
2871 return running + uninterruptible; 2920 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2921 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2922 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2923
2924 calc_load_update += LOAD_FREQ;
2925}
2926
2927/*
2928 * Either called from update_cpu_load() or from a cpu going idle
2929 */
2930static void calc_load_account_active(struct rq *this_rq)
2931{
2932 long nr_active, delta;
2933
2934 nr_active = this_rq->nr_running;
2935 nr_active += (long) this_rq->nr_uninterruptible;
2936
2937 if (nr_active != this_rq->calc_load_active) {
2938 delta = nr_active - this_rq->calc_load_active;
2939 this_rq->calc_load_active = nr_active;
2940 atomic_long_add(delta, &calc_load_tasks);
2941 }
2872} 2942}
2873 2943
2874/* 2944/*
@@ -2899,6 +2969,11 @@ static void update_cpu_load(struct rq *this_rq)
2899 new_load += scale-1; 2969 new_load += scale-1;
2900 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 2970 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2901 } 2971 }
2972
2973 if (time_after_eq(jiffies, this_rq->calc_load_update)) {
2974 this_rq->calc_load_update += LOAD_FREQ;
2975 calc_load_account_active(this_rq);
2976 }
2902} 2977}
2903 2978
2904#ifdef CONFIG_SMP 2979#ifdef CONFIG_SMP
@@ -4240,10 +4315,126 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
4240static struct { 4315static struct {
4241 atomic_t load_balancer; 4316 atomic_t load_balancer;
4242 cpumask_var_t cpu_mask; 4317 cpumask_var_t cpu_mask;
4318 cpumask_var_t ilb_grp_nohz_mask;
4243} nohz ____cacheline_aligned = { 4319} nohz ____cacheline_aligned = {
4244 .load_balancer = ATOMIC_INIT(-1), 4320 .load_balancer = ATOMIC_INIT(-1),
4245}; 4321};
4246 4322
4323#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4324/**
4325 * lowest_flag_domain - Return lowest sched_domain containing flag.
4326 * @cpu: The cpu whose lowest level of sched domain is to
4327 * be returned.
4328 * @flag: The flag to check for the lowest sched_domain
4329 * for the given cpu.
4330 *
4331 * Returns the lowest sched_domain of a cpu which contains the given flag.
4332 */
4333static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4334{
4335 struct sched_domain *sd;
4336
4337 for_each_domain(cpu, sd)
4338 if (sd && (sd->flags & flag))
4339 break;
4340
4341 return sd;
4342}
4343
4344/**
4345 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4346 * @cpu: The cpu whose domains we're iterating over.
4347 * @sd: variable holding the value of the power_savings_sd
4348 * for cpu.
4349 * @flag: The flag to filter the sched_domains to be iterated.
4350 *
4351 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4352 * set, starting from the lowest sched_domain to the highest.
4353 */
4354#define for_each_flag_domain(cpu, sd, flag) \
4355 for (sd = lowest_flag_domain(cpu, flag); \
4356 (sd && (sd->flags & flag)); sd = sd->parent)
4357
4358/**
4359 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4360 * @ilb_group: group to be checked for semi-idleness
4361 *
4362 * Returns: 1 if the group is semi-idle. 0 otherwise.
4363 *
4364 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4365 * and atleast one non-idle CPU. This helper function checks if the given
4366 * sched_group is semi-idle or not.
4367 */
4368static inline int is_semi_idle_group(struct sched_group *ilb_group)
4369{
4370 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
4371 sched_group_cpus(ilb_group));
4372
4373 /*
4374 * A sched_group is semi-idle when it has atleast one busy cpu
4375 * and atleast one idle cpu.
4376 */
4377 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
4378 return 0;
4379
4380 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
4381 return 0;
4382
4383 return 1;
4384}
4385/**
4386 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4387 * @cpu: The cpu which is nominating a new idle_load_balancer.
4388 *
4389 * Returns: Returns the id of the idle load balancer if it exists,
4390 * Else, returns >= nr_cpu_ids.
4391 *
4392 * This algorithm picks the idle load balancer such that it belongs to a
4393 * semi-idle powersavings sched_domain. The idea is to try and avoid
4394 * completely idle packages/cores just for the purpose of idle load balancing
4395 * when there are other idle cpu's which are better suited for that job.
4396 */
4397static int find_new_ilb(int cpu)
4398{
4399 struct sched_domain *sd;
4400 struct sched_group *ilb_group;
4401
4402 /*
4403 * Have idle load balancer selection from semi-idle packages only
4404 * when power-aware load balancing is enabled
4405 */
4406 if (!(sched_smt_power_savings || sched_mc_power_savings))
4407 goto out_done;
4408
4409 /*
4410 * Optimize for the case when we have no idle CPUs or only one
4411 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4412 */
4413 if (cpumask_weight(nohz.cpu_mask) < 2)
4414 goto out_done;
4415
4416 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4417 ilb_group = sd->groups;
4418
4419 do {
4420 if (is_semi_idle_group(ilb_group))
4421 return cpumask_first(nohz.ilb_grp_nohz_mask);
4422
4423 ilb_group = ilb_group->next;
4424
4425 } while (ilb_group != sd->groups);
4426 }
4427
4428out_done:
4429 return cpumask_first(nohz.cpu_mask);
4430}
4431#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4432static inline int find_new_ilb(int call_cpu)
4433{
4434 return cpumask_first(nohz.cpu_mask);
4435}
4436#endif
4437
4247/* 4438/*
4248 * This routine will try to nominate the ilb (idle load balancing) 4439 * This routine will try to nominate the ilb (idle load balancing)
4249 * owner among the cpus whose ticks are stopped. ilb owner will do the idle 4440 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
@@ -4298,8 +4489,24 @@ int select_nohz_load_balancer(int stop_tick)
4298 /* make me the ilb owner */ 4489 /* make me the ilb owner */
4299 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) 4490 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4300 return 1; 4491 return 1;
4301 } else if (atomic_read(&nohz.load_balancer) == cpu) 4492 } else if (atomic_read(&nohz.load_balancer) == cpu) {
4493 int new_ilb;
4494
4495 if (!(sched_smt_power_savings ||
4496 sched_mc_power_savings))
4497 return 1;
4498 /*
4499 * Check to see if there is a more power-efficient
4500 * ilb.
4501 */
4502 new_ilb = find_new_ilb(cpu);
4503 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
4504 atomic_set(&nohz.load_balancer, -1);
4505 resched_cpu(new_ilb);
4506 return 0;
4507 }
4302 return 1; 4508 return 1;
4509 }
4303 } else { 4510 } else {
4304 if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) 4511 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
4305 return 0; 4512 return 0;
@@ -4468,15 +4675,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4468 } 4675 }
4469 4676
4470 if (atomic_read(&nohz.load_balancer) == -1) { 4677 if (atomic_read(&nohz.load_balancer) == -1) {
4471 /* 4678 int ilb = find_new_ilb(cpu);
4472 * simple selection for now: Nominate the
4473 * first cpu in the nohz list to be the next
4474 * ilb owner.
4475 *
4476 * TBD: Traverse the sched domains and nominate
4477 * the nearest cpu in the nohz.cpu_mask.
4478 */
4479 int ilb = cpumask_first(nohz.cpu_mask);
4480 4679
4481 if (ilb < nr_cpu_ids) 4680 if (ilb < nr_cpu_ids)
4482 resched_cpu(ilb); 4681 resched_cpu(ilb);
@@ -5007,13 +5206,15 @@ pick_next_task(struct rq *rq)
5007/* 5206/*
5008 * schedule() is the main scheduler function. 5207 * schedule() is the main scheduler function.
5009 */ 5208 */
5010asmlinkage void __sched __schedule(void) 5209asmlinkage void __sched schedule(void)
5011{ 5210{
5012 struct task_struct *prev, *next; 5211 struct task_struct *prev, *next;
5013 unsigned long *switch_count; 5212 unsigned long *switch_count;
5014 struct rq *rq; 5213 struct rq *rq;
5015 int cpu; 5214 int cpu;
5016 5215
5216need_resched:
5217 preempt_disable();
5017 cpu = smp_processor_id(); 5218 cpu = smp_processor_id();
5018 rq = cpu_rq(cpu); 5219 rq = cpu_rq(cpu);
5019 rcu_qsctr_inc(cpu); 5220 rcu_qsctr_inc(cpu);
@@ -5070,15 +5271,9 @@ need_resched_nonpreemptible:
5070 5271
5071 if (unlikely(reacquire_kernel_lock(current) < 0)) 5272 if (unlikely(reacquire_kernel_lock(current) < 0))
5072 goto need_resched_nonpreemptible; 5273 goto need_resched_nonpreemptible;
5073}
5074 5274
5075asmlinkage void __sched schedule(void)
5076{
5077need_resched:
5078 preempt_disable();
5079 __schedule();
5080 preempt_enable_no_resched(); 5275 preempt_enable_no_resched();
5081 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5276 if (need_resched())
5082 goto need_resched; 5277 goto need_resched;
5083} 5278}
5084EXPORT_SYMBOL(schedule); 5279EXPORT_SYMBOL(schedule);
@@ -5221,7 +5416,7 @@ EXPORT_SYMBOL(default_wake_function);
5221 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 5416 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
5222 * zero in this (rare) case, and we handle it by continuing to scan the queue. 5417 * zero in this (rare) case, and we handle it by continuing to scan the queue.
5223 */ 5418 */
5224void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 5419static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
5225 int nr_exclusive, int sync, void *key) 5420 int nr_exclusive, int sync, void *key)
5226{ 5421{
5227 wait_queue_t *curr, *next; 5422 wait_queue_t *curr, *next;
@@ -5241,6 +5436,9 @@ void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
5241 * @mode: which threads 5436 * @mode: which threads
5242 * @nr_exclusive: how many wake-one or wake-many threads to wake up 5437 * @nr_exclusive: how many wake-one or wake-many threads to wake up
5243 * @key: is directly passed to the wakeup function 5438 * @key: is directly passed to the wakeup function
5439 *
5440 * It may be assumed that this function implies a write memory barrier before
5441 * changing the task state if and only if any tasks are woken up.
5244 */ 5442 */
5245void __wake_up(wait_queue_head_t *q, unsigned int mode, 5443void __wake_up(wait_queue_head_t *q, unsigned int mode,
5246 int nr_exclusive, void *key) 5444 int nr_exclusive, void *key)
@@ -5279,6 +5477,9 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5279 * with each other. This can prevent needless bouncing between CPUs. 5477 * with each other. This can prevent needless bouncing between CPUs.
5280 * 5478 *
5281 * On UP it can prevent extra preemption. 5479 * On UP it can prevent extra preemption.
5480 *
5481 * It may be assumed that this function implies a write memory barrier before
5482 * changing the task state if and only if any tasks are woken up.
5282 */ 5483 */
5283void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, 5484void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
5284 int nr_exclusive, void *key) 5485 int nr_exclusive, void *key)
@@ -5315,6 +5516,9 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
5315 * awakened in the same order in which they were queued. 5516 * awakened in the same order in which they were queued.
5316 * 5517 *
5317 * See also complete_all(), wait_for_completion() and related routines. 5518 * See also complete_all(), wait_for_completion() and related routines.
5519 *
5520 * It may be assumed that this function implies a write memory barrier before
5521 * changing the task state if and only if any tasks are woken up.
5318 */ 5522 */
5319void complete(struct completion *x) 5523void complete(struct completion *x)
5320{ 5524{
@@ -5332,6 +5536,9 @@ EXPORT_SYMBOL(complete);
5332 * @x: holds the state of this particular completion 5536 * @x: holds the state of this particular completion
5333 * 5537 *
5334 * This will wake up all threads waiting on this particular completion event. 5538 * This will wake up all threads waiting on this particular completion event.
5539 *
5540 * It may be assumed that this function implies a write memory barrier before
5541 * changing the task state if and only if any tasks are woken up.
5335 */ 5542 */
5336void complete_all(struct completion *x) 5543void complete_all(struct completion *x)
5337{ 5544{
@@ -6490,8 +6697,9 @@ void sched_show_task(struct task_struct *p)
6490#ifdef CONFIG_DEBUG_STACK_USAGE 6697#ifdef CONFIG_DEBUG_STACK_USAGE
6491 free = stack_not_used(p); 6698 free = stack_not_used(p);
6492#endif 6699#endif
6493 printk(KERN_CONT "%5lu %5d %6d\n", free, 6700 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
6494 task_pid_nr(p), task_pid_nr(p->real_parent)); 6701 task_pid_nr(p), task_pid_nr(p->real_parent),
6702 (unsigned long)task_thread_info(p)->flags);
6495 6703
6496 show_stack(p, NULL); 6704 show_stack(p, NULL);
6497} 6705}
@@ -6970,6 +7178,14 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6970 7178
6971 } 7179 }
6972} 7180}
7181
7182/*
7183 * remove the tasks which were accounted by rq from calc_load_tasks.
7184 */
7185static void calc_global_load_remove(struct rq *rq)
7186{
7187 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7188}
6973#endif /* CONFIG_HOTPLUG_CPU */ 7189#endif /* CONFIG_HOTPLUG_CPU */
6974 7190
6975#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 7191#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -7204,6 +7420,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7204 /* Update our root-domain */ 7420 /* Update our root-domain */
7205 rq = cpu_rq(cpu); 7421 rq = cpu_rq(cpu);
7206 spin_lock_irqsave(&rq->lock, flags); 7422 spin_lock_irqsave(&rq->lock, flags);
7423 rq->calc_load_update = calc_load_update;
7424 rq->calc_load_active = 0;
7207 if (rq->rd) { 7425 if (rq->rd) {
7208 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7426 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7209 7427
@@ -7243,7 +7461,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7243 cpuset_unlock(); 7461 cpuset_unlock();
7244 migrate_nr_uninterruptible(rq); 7462 migrate_nr_uninterruptible(rq);
7245 BUG_ON(rq->nr_running != 0); 7463 BUG_ON(rq->nr_running != 0);
7246 7464 calc_global_load_remove(rq);
7247 /* 7465 /*
7248 * No need to migrate the tasks: it was best-effort if 7466 * No need to migrate the tasks: it was best-effort if
7249 * they didn't take sched_hotcpu_mutex. Just wake up 7467 * they didn't take sched_hotcpu_mutex. Just wake up
@@ -7753,8 +7971,9 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7753 7971
7754/* 7972/*
7755 * The cpus mask in sched_group and sched_domain hangs off the end. 7973 * The cpus mask in sched_group and sched_domain hangs off the end.
7756 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space 7974 *
7757 * for nr_cpu_ids < CONFIG_NR_CPUS. 7975 * ( See the the comments in include/linux/sched.h:struct sched_group
7976 * and struct sched_domain. )
7758 */ 7977 */
7759struct static_sched_group { 7978struct static_sched_group {
7760 struct sched_group sg; 7979 struct sched_group sg;
@@ -7875,7 +8094,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7875 struct sched_domain *sd; 8094 struct sched_domain *sd;
7876 8095
7877 sd = &per_cpu(phys_domains, j).sd; 8096 sd = &per_cpu(phys_domains, j).sd;
7878 if (j != cpumask_first(sched_group_cpus(sd->groups))) { 8097 if (j != group_first_cpu(sd->groups)) {
7879 /* 8098 /*
7880 * Only add "power" once for each 8099 * Only add "power" once for each
7881 * physical package. 8100 * physical package.
@@ -7953,7 +8172,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7953 8172
7954 WARN_ON(!sd || !sd->groups); 8173 WARN_ON(!sd || !sd->groups);
7955 8174
7956 if (cpu != cpumask_first(sched_group_cpus(sd->groups))) 8175 if (cpu != group_first_cpu(sd->groups))
7957 return; 8176 return;
7958 8177
7959 child = sd->child; 8178 child = sd->child;
@@ -8938,6 +9157,8 @@ void __init sched_init(void)
8938 rq = cpu_rq(i); 9157 rq = cpu_rq(i);
8939 spin_lock_init(&rq->lock); 9158 spin_lock_init(&rq->lock);
8940 rq->nr_running = 0; 9159 rq->nr_running = 0;
9160 rq->calc_load_active = 0;
9161 rq->calc_load_update = jiffies + LOAD_FREQ;
8941 init_cfs_rq(&rq->cfs, rq); 9162 init_cfs_rq(&rq->cfs, rq);
8942 init_rt_rq(&rq->rt, rq); 9163 init_rt_rq(&rq->rt, rq);
8943#ifdef CONFIG_FAIR_GROUP_SCHED 9164#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -9045,6 +9266,9 @@ void __init sched_init(void)
9045 * when this runqueue becomes "idle". 9266 * when this runqueue becomes "idle".
9046 */ 9267 */
9047 init_idle(current, smp_processor_id()); 9268 init_idle(current, smp_processor_id());
9269
9270 calc_load_update = jiffies + LOAD_FREQ;
9271
9048 /* 9272 /*
9049 * During early bootup we pretend to be a normal task: 9273 * During early bootup we pretend to be a normal task:
9050 */ 9274 */
@@ -9055,6 +9279,7 @@ void __init sched_init(void)
9055#ifdef CONFIG_SMP 9279#ifdef CONFIG_SMP
9056#ifdef CONFIG_NO_HZ 9280#ifdef CONFIG_NO_HZ
9057 alloc_bootmem_cpumask_var(&nohz.cpu_mask); 9281 alloc_bootmem_cpumask_var(&nohz.cpu_mask);
9282 alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask);
9058#endif 9283#endif
9059 alloc_bootmem_cpumask_var(&cpu_isolated_map); 9284 alloc_bootmem_cpumask_var(&cpu_isolated_map);
9060#endif /* SMP */ 9285#endif /* SMP */
@@ -9800,6 +10025,13 @@ static int sched_rt_global_constraints(void)
9800 if (sysctl_sched_rt_period <= 0) 10025 if (sysctl_sched_rt_period <= 0)
9801 return -EINVAL; 10026 return -EINVAL;
9802 10027
10028 /*
10029 * There's always some RT tasks in the root group
10030 * -- migration, kstopmachine etc..
10031 */
10032 if (sysctl_sched_rt_runtime == 0)
10033 return -EBUSY;
10034
9803 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10035 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
9804 for_each_possible_cpu(i) { 10036 for_each_possible_cpu(i) {
9805 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10037 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index cdd3c89574cd..344712a5e3ed 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
165 vec->count = 0; 165 vec->count = 0;
166 if (bootmem) 166 if (bootmem)
167 alloc_bootmem_cpumask_var(&vec->mask); 167 alloc_bootmem_cpumask_var(&vec->mask);
168 else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) 168 else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
169 goto cleanup; 169 goto cleanup;
170 } 170 }
171 171
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3816f217f119..5f9650e8fe75 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1487,17 +1487,10 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1487 1487
1488 find_matching_se(&se, &pse); 1488 find_matching_se(&se, &pse);
1489 1489
1490 while (se) { 1490 BUG_ON(!pse);
1491 BUG_ON(!pse);
1492 1491
1493 if (wakeup_preempt_entity(se, pse) == 1) { 1492 if (wakeup_preempt_entity(se, pse) == 1)
1494 resched_task(curr); 1493 resched_task(curr);
1495 break;
1496 }
1497
1498 se = parent_entity(se);
1499 pse = parent_entity(pse);
1500 }
1501} 1494}
1502 1495
1503static struct task_struct *pick_next_task_fair(struct rq *rq) 1496static struct task_struct *pick_next_task_fair(struct rq *rq)
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 8a21a2e28c13..499672c10cbd 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -22,7 +22,8 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sy
22static struct task_struct *pick_next_task_idle(struct rq *rq) 22static struct task_struct *pick_next_task_idle(struct rq *rq)
23{ 23{
24 schedstat_inc(rq, sched_goidle); 24 schedstat_inc(rq, sched_goidle);
25 25 /* adjust the active tasks as we might go into a long sleep */
26 calc_load_account_active(rq);
26 return rq->idle; 27 return rq->idle;
27} 28}
28 29
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f2c66f8f9712..9bf0d2a73045 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void)
1591 unsigned int i; 1591 unsigned int i;
1592 1592
1593 for_each_possible_cpu(i) 1593 for_each_possible_cpu(i)
1594 alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1594 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1595 GFP_KERNEL, cpu_to_node(i)); 1595 GFP_KERNEL, cpu_to_node(i));
1596} 1596}
1597#endif /* CONFIG_SMP */ 1597#endif /* CONFIG_SMP */
diff --git a/kernel/smp.c b/kernel/smp.c
index 858baac568ee..ad63d8501207 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
52 switch (action) { 52 switch (action) {
53 case CPU_UP_PREPARE: 53 case CPU_UP_PREPARE:
54 case CPU_UP_PREPARE_FROZEN: 54 case CPU_UP_PREPARE_FROZEN:
55 if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 55 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
56 cpu_to_node(cpu))) 56 cpu_to_node(cpu)))
57 return NOTIFY_BAD; 57 return NOTIFY_BAD;
58 break; 58 break;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b525dd348511..f674f332a024 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -828,7 +828,7 @@ int __init __weak arch_early_irq_init(void)
828 return 0; 828 return 0;
829} 829}
830 830
831int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) 831int __weak arch_init_chip_data(struct irq_desc *desc, int node)
832{ 832{
833 return 0; 833 return 0;
834} 834}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b2970d56fb76..6a463716ecbf 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -731,6 +731,14 @@ static struct ctl_table kern_table[] = {
731 }, 731 },
732 { 732 {
733 .ctl_name = CTL_UNNUMBERED, 733 .ctl_name = CTL_UNNUMBERED,
734 .procname = "bootloader_version",
735 .data = &bootloader_version,
736 .maxlen = sizeof (int),
737 .mode = 0444,
738 .proc_handler = &proc_dointvec,
739 },
740 {
741 .ctl_name = CTL_UNNUMBERED,
734 .procname = "kstack_depth_to_print", 742 .procname = "kstack_depth_to_print",
735 .data = &kstack_depth_to_print, 743 .data = &kstack_depth_to_print,
736 .maxlen = sizeof(int), 744 .maxlen = sizeof(int),
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 687dff49f6e7..52a8bf8931f3 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -22,7 +22,7 @@
22 22
23/* 23/*
24 * This read-write spinlock protects us from races in SMP while 24 * This read-write spinlock protects us from races in SMP while
25 * playing with xtime and avenrun. 25 * playing with xtime.
26 */ 26 */
27__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 27__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
28 28
diff --git a/kernel/timer.c b/kernel/timer.c
index cffffad01c31..a26ed294f938 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1123,47 +1123,6 @@ void update_process_times(int user_tick)
1123} 1123}
1124 1124
1125/* 1125/*
1126 * Nr of active tasks - counted in fixed-point numbers
1127 */
1128static unsigned long count_active_tasks(void)
1129{
1130 return nr_active() * FIXED_1;
1131}
1132
1133/*
1134 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1135 * imply that avenrun[] is the standard name for this kind of thing.
1136 * Nothing else seems to be standardized: the fractional size etc
1137 * all seem to differ on different machines.
1138 *
1139 * Requires xtime_lock to access.
1140 */
1141unsigned long avenrun[3];
1142
1143EXPORT_SYMBOL(avenrun);
1144
1145/*
1146 * calc_load - given tick count, update the avenrun load estimates.
1147 * This is called while holding a write_lock on xtime_lock.
1148 */
1149static inline void calc_load(unsigned long ticks)
1150{
1151 unsigned long active_tasks; /* fixed-point */
1152 static int count = LOAD_FREQ;
1153
1154 count -= ticks;
1155 if (unlikely(count < 0)) {
1156 active_tasks = count_active_tasks();
1157 do {
1158 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1159 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1160 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1161 count += LOAD_FREQ;
1162 } while (count < 0);
1163 }
1164}
1165
1166/*
1167 * This function runs timers and the timer-tq in bottom half context. 1126 * This function runs timers and the timer-tq in bottom half context.
1168 */ 1127 */
1169static void run_timer_softirq(struct softirq_action *h) 1128static void run_timer_softirq(struct softirq_action *h)
@@ -1187,16 +1146,6 @@ void run_local_timers(void)
1187} 1146}
1188 1147
1189/* 1148/*
1190 * Called by the timer interrupt. xtime_lock must already be taken
1191 * by the timer IRQ!
1192 */
1193static inline void update_times(unsigned long ticks)
1194{
1195 update_wall_time();
1196 calc_load(ticks);
1197}
1198
1199/*
1200 * The 64-bit jiffies value is not atomic - you MUST NOT read it 1149 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1201 * without sampling the sequence number in xtime_lock. 1150 * without sampling the sequence number in xtime_lock.
1202 * jiffies is defined in the linker script... 1151 * jiffies is defined in the linker script...
@@ -1205,7 +1154,8 @@ static inline void update_times(unsigned long ticks)
1205void do_timer(unsigned long ticks) 1154void do_timer(unsigned long ticks)
1206{ 1155{
1207 jiffies_64 += ticks; 1156 jiffies_64 += ticks;
1208 update_times(ticks); 1157 update_wall_time();
1158 calc_global_load();
1209} 1159}
1210 1160
1211#ifdef __ARCH_WANT_SYS_ALARM 1161#ifdef __ARCH_WANT_SYS_ALARM
@@ -1406,37 +1356,17 @@ int do_sysinfo(struct sysinfo *info)
1406{ 1356{
1407 unsigned long mem_total, sav_total; 1357 unsigned long mem_total, sav_total;
1408 unsigned int mem_unit, bitcount; 1358 unsigned int mem_unit, bitcount;
1409 unsigned long seq; 1359 struct timespec tp;
1410 1360
1411 memset(info, 0, sizeof(struct sysinfo)); 1361 memset(info, 0, sizeof(struct sysinfo));
1412 1362
1413 do { 1363 ktime_get_ts(&tp);
1414 struct timespec tp; 1364 monotonic_to_bootbased(&tp);
1415 seq = read_seqbegin(&xtime_lock); 1365 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1416
1417 /*
1418 * This is annoying. The below is the same thing
1419 * posix_get_clock_monotonic() does, but it wants to
1420 * take the lock which we want to cover the loads stuff
1421 * too.
1422 */
1423
1424 getnstimeofday(&tp);
1425 tp.tv_sec += wall_to_monotonic.tv_sec;
1426 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1427 monotonic_to_bootbased(&tp);
1428 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1429 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1430 tp.tv_sec++;
1431 }
1432 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1433 1366
1434 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); 1367 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1435 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1436 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1437 1368
1438 info->procs = nr_threads; 1369 info->procs = nr_threads;
1439 } while (read_seqretry(&xtime_lock, seq));
1440 1370
1441 si_meminfo(info); 1371 si_meminfo(info);
1442 si_swapinfo(info); 1372 si_swapinfo(info);
diff --git a/kernel/wait.c b/kernel/wait.c
index 42a2dbc181c8..ea7c3b4275cf 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -154,7 +154,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
154 if (!list_empty(&wait->task_list)) 154 if (!list_empty(&wait->task_list))
155 list_del_init(&wait->task_list); 155 list_del_init(&wait->task_list);
156 else if (waitqueue_active(q)) 156 else if (waitqueue_active(q))
157 __wake_up_common(q, mode, 1, 0, key); 157 __wake_up_locked_key(q, mode, key);
158 spin_unlock_irqrestore(&q->lock, flags); 158 spin_unlock_irqrestore(&q->lock, flags);
159} 159}
160EXPORT_SYMBOL(abort_exclusive_wait); 160EXPORT_SYMBOL(abort_exclusive_wait);