aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/irq/chip.c7
-rw-r--r--kernel/irq/handle.c36
-rw-r--r--kernel/irq/internals.h1
-rw-r--r--kernel/irq/manage.c192
-rw-r--r--kernel/irq/numa_migrate.c11
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/latencytop.c83
-rw-r--r--kernel/module.c51
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/rcuclassic.c4
-rw-r--r--kernel/rcupdate.c12
-rw-r--r--kernel/rcupreempt.c3
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched.c997
-rw-r--r--kernel/sched_clock.c30
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h3
-rw-r--r--kernel/sched_rt.c537
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/softirq.c1
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c20
-rw-r--r--kernel/time/clocksource.c76
-rw-r--r--kernel/time/ntp.c444
-rw-r--r--kernel/time/timecompare.c191
-rw-r--r--kernel/timer.c110
-rw-r--r--kernel/tsacct.c6
-rw-r--r--kernel/user.c34
34 files changed, 2121 insertions, 878 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 8de303bdd4e5..6715ebc3761d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1184,10 +1184,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1184#endif 1184#endif
1185 clear_all_latency_tracing(p); 1185 clear_all_latency_tracing(p);
1186 1186
1187 /* Our parent execution domain becomes current domain
1188 These must match for thread signalling to apply */
1189 p->parent_exec_id = p->self_exec_id;
1190
1191 /* ok, now we should be set up.. */ 1187 /* ok, now we should be set up.. */
1192 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1188 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1193 p->pdeath_signal = 0; 1189 p->pdeath_signal = 0;
@@ -1225,10 +1221,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1225 set_task_cpu(p, smp_processor_id()); 1221 set_task_cpu(p, smp_processor_id());
1226 1222
1227 /* CLONE_PARENT re-uses the old parent */ 1223 /* CLONE_PARENT re-uses the old parent */
1228 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1224 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1229 p->real_parent = current->real_parent; 1225 p->real_parent = current->real_parent;
1230 else 1226 p->parent_exec_id = current->parent_exec_id;
1227 } else {
1231 p->real_parent = current; 1228 p->real_parent = current;
1229 p->parent_exec_id = current->self_exec_id;
1230 }
1232 1231
1233 spin_lock(&current->sighand->siglock); 1232 spin_lock(&current->sighand->siglock);
1234 1233
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 122fef4b0bd3..c687ba4363f2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -81,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq)
81 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
82 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
83 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc);
84 spin_unlock_irqrestore(&desc->lock, flags); 85 spin_unlock_irqrestore(&desc->lock, flags);
85} 86}
86 87
@@ -293,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
293 desc->chip->mask_ack(irq); 294 desc->chip->mask_ack(irq);
294 else { 295 else {
295 desc->chip->mask(irq); 296 desc->chip->mask(irq);
296 desc->chip->ack(irq); 297 if (desc->chip->ack)
298 desc->chip->ack(irq);
297 } 299 }
298} 300}
299 301
@@ -479,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
479 kstat_incr_irqs_this_cpu(irq, desc); 481 kstat_incr_irqs_this_cpu(irq, desc);
480 482
481 /* Start handling the irq */ 483 /* Start handling the irq */
482 desc->chip->ack(irq); 484 if (desc->chip->ack)
485 desc->chip->ack(irq);
483 desc = irq_remap_to_desc(irq, desc); 486 desc = irq_remap_to_desc(irq, desc);
484 487
485 /* Mark the IRQ currently in progress.*/ 488 /* Mark the IRQ currently in progress.*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f51eaee921b6..9ebf77968871 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -82,19 +82,21 @@ static struct irq_desc irq_desc_init = {
82 82
83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
84{ 84{
85 unsigned long bytes;
86 char *ptr;
87 int node; 85 int node;
88 86 void *ptr;
89 /* Compute how many bytes we need per irq and allocate them */
90 bytes = nr * sizeof(unsigned int);
91 87
92 node = cpu_to_node(cpu); 88 node = cpu_to_node(cpu);
93 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 89 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
94 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
95 90
96 if (ptr) 91 /*
97 desc->kstat_irqs = (unsigned int *)ptr; 92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
97 cpu, node);
98 desc->kstat_irqs = ptr;
99 }
98} 100}
99 101
100static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 102static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -237,6 +239,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
237 } 239 }
238}; 240};
239 241
242static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
240int __init early_irq_init(void) 243int __init early_irq_init(void)
241{ 244{
242 struct irq_desc *desc; 245 struct irq_desc *desc;
@@ -253,6 +256,7 @@ int __init early_irq_init(void)
253 for (i = 0; i < count; i++) { 256 for (i = 0; i < count; i++) {
254 desc[i].irq = i; 257 desc[i].irq = i;
255 init_alloc_desc_masks(&desc[i], 0, true); 258 init_alloc_desc_masks(&desc[i], 0, true);
259 desc[i].kstat_irqs = kstat_irqs_all[i];
256 } 260 }
257 return arch_early_irq_init(); 261 return arch_early_irq_init();
258} 262}
@@ -268,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
268} 272}
269#endif /* !CONFIG_SPARSE_IRQ */ 273#endif /* !CONFIG_SPARSE_IRQ */
270 274
275void clear_kstat_irqs(struct irq_desc *desc)
276{
277 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
278}
279
271/* 280/*
272 * What should we do if we get a hw irq event on an illegal vector? 281 * What should we do if we get a hw irq event on an illegal vector?
273 * Each architecture has to answer this themself. 282 * Each architecture has to answer this themself.
@@ -341,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
341 irqreturn_t ret, retval = IRQ_NONE; 350 irqreturn_t ret, retval = IRQ_NONE;
342 unsigned int status = 0; 351 unsigned int status = 0;
343 352
353 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
354
344 if (!(action->flags & IRQF_DISABLED)) 355 if (!(action->flags & IRQF_DISABLED))
345 local_irq_enable_in_hardirq(); 356 local_irq_enable_in_hardirq();
346 357
@@ -360,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
360} 371}
361 372
362#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 373#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
374
375#ifdef CONFIG_ENABLE_WARN_DEPRECATED
376# warning __do_IRQ is deprecated. Please convert to proper flow handlers
377#endif
378
363/** 379/**
364 * __do_IRQ - original all in one highlevel IRQ handler 380 * __do_IRQ - original all in one highlevel IRQ handler
365 * @irq: the interrupt number 381 * @irq: the interrupt number
@@ -480,12 +496,10 @@ void early_init_irq_lock_class(void)
480 } 496 }
481} 497}
482 498
483#ifdef CONFIG_SPARSE_IRQ
484unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 499unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
485{ 500{
486 struct irq_desc *desc = irq_to_desc(irq); 501 struct irq_desc *desc = irq_to_desc(irq);
487 return desc ? desc->kstat_irqs[cpu] : 0; 502 return desc ? desc->kstat_irqs[cpu] : 0;
488} 503}
489#endif
490EXPORT_SYMBOL(kstat_irqs_cpu); 504EXPORT_SYMBOL(kstat_irqs_cpu);
491 505
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 40416a81a0f5..ee1aa9f8e8b9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -15,6 +15,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
15 15
16extern struct lock_class_key irq_desc_lock_class; 16extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
18extern void clear_kstat_irqs(struct irq_desc *desc);
18extern spinlock_t sparse_irq_lock; 19extern spinlock_t sparse_irq_lock;
19 20
20#ifdef CONFIG_SPARSE_IRQ 21#ifdef CONFIG_SPARSE_IRQ
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a3a5dc9ef346..6458e99984c0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109/* 109/*
110 * Generic version of the affinity autoselector. 110 * Generic version of the affinity autoselector.
111 */ 111 */
112int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 112static int setup_affinity(unsigned int irq, struct irq_desc *desc)
113{ 113{
114 if (!irq_can_set_affinity(irq)) 114 if (!irq_can_set_affinity(irq))
115 return 0; 115 return 0;
@@ -133,7 +133,7 @@ set_affinity:
133 return 0; 133 return 0;
134} 134}
135#else 135#else
136static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 136static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
137{ 137{
138 return irq_select_affinity(irq); 138 return irq_select_affinity(irq);
139} 139}
@@ -149,14 +149,14 @@ int irq_select_affinity_usr(unsigned int irq)
149 int ret; 149 int ret;
150 150
151 spin_lock_irqsave(&desc->lock, flags); 151 spin_lock_irqsave(&desc->lock, flags);
152 ret = do_irq_select_affinity(irq, desc); 152 ret = setup_affinity(irq, desc);
153 spin_unlock_irqrestore(&desc->lock, flags); 153 spin_unlock_irqrestore(&desc->lock, flags);
154 154
155 return ret; 155 return ret;
156} 156}
157 157
158#else 158#else
159static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 159static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
160{ 160{
161 return 0; 161 return 0;
162} 162}
@@ -389,9 +389,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
389 * allocate special interrupts that are part of the architecture. 389 * allocate special interrupts that are part of the architecture.
390 */ 390 */
391static int 391static int
392__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) 392__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
393{ 393{
394 struct irqaction *old, **p; 394 struct irqaction *old, **old_ptr;
395 const char *old_name = NULL; 395 const char *old_name = NULL;
396 unsigned long flags; 396 unsigned long flags;
397 int shared = 0; 397 int shared = 0;
@@ -423,8 +423,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
423 * The following block of code has to be executed atomically 423 * The following block of code has to be executed atomically
424 */ 424 */
425 spin_lock_irqsave(&desc->lock, flags); 425 spin_lock_irqsave(&desc->lock, flags);
426 p = &desc->action; 426 old_ptr = &desc->action;
427 old = *p; 427 old = *old_ptr;
428 if (old) { 428 if (old) {
429 /* 429 /*
430 * Can't share interrupts unless both agree to and are 430 * Can't share interrupts unless both agree to and are
@@ -447,8 +447,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
447 447
448 /* add new interrupt at end of irq queue */ 448 /* add new interrupt at end of irq queue */
449 do { 449 do {
450 p = &old->next; 450 old_ptr = &old->next;
451 old = *p; 451 old = *old_ptr;
452 } while (old); 452 } while (old);
453 shared = 1; 453 shared = 1;
454 } 454 }
@@ -488,7 +488,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
488 desc->status |= IRQ_NO_BALANCING; 488 desc->status |= IRQ_NO_BALANCING;
489 489
490 /* Set default affinity mask once everything is setup */ 490 /* Set default affinity mask once everything is setup */
491 do_irq_select_affinity(irq, desc); 491 setup_affinity(irq, desc);
492 492
493 } else if ((new->flags & IRQF_TRIGGER_MASK) 493 } else if ((new->flags & IRQF_TRIGGER_MASK)
494 && (new->flags & IRQF_TRIGGER_MASK) 494 && (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +499,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
499 (int)(new->flags & IRQF_TRIGGER_MASK)); 499 (int)(new->flags & IRQF_TRIGGER_MASK));
500 } 500 }
501 501
502 *p = new; 502 *old_ptr = new;
503 503
504 /* Reset broken irq detection when installing new handler */ 504 /* Reset broken irq detection when installing new handler */
505 desc->irq_count = 0; 505 desc->irq_count = 0;
@@ -549,90 +549,117 @@ int setup_irq(unsigned int irq, struct irqaction *act)
549 549
550 return __setup_irq(irq, desc, act); 550 return __setup_irq(irq, desc, act);
551} 551}
552EXPORT_SYMBOL_GPL(setup_irq);
552 553
553/** 554 /*
554 * free_irq - free an interrupt 555 * Internal function to unregister an irqaction - used to free
555 * @irq: Interrupt line to free 556 * regular and special interrupts that are part of the architecture.
556 * @dev_id: Device identity to free
557 *
558 * Remove an interrupt handler. The handler is removed and if the
559 * interrupt line is no longer in use by any driver it is disabled.
560 * On a shared IRQ the caller must ensure the interrupt is disabled
561 * on the card it drives before calling this function. The function
562 * does not return until any executing interrupts for this IRQ
563 * have completed.
564 *
565 * This function must not be called from interrupt context.
566 */ 557 */
567void free_irq(unsigned int irq, void *dev_id) 558static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
568{ 559{
569 struct irq_desc *desc = irq_to_desc(irq); 560 struct irq_desc *desc = irq_to_desc(irq);
570 struct irqaction **p; 561 struct irqaction *action, **action_ptr;
571 unsigned long flags; 562 unsigned long flags;
572 563
573 WARN_ON(in_interrupt()); 564 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
574 565
575 if (!desc) 566 if (!desc)
576 return; 567 return NULL;
577 568
578 spin_lock_irqsave(&desc->lock, flags); 569 spin_lock_irqsave(&desc->lock, flags);
579 p = &desc->action; 570
571 /*
572 * There can be multiple actions per IRQ descriptor, find the right
573 * one based on the dev_id:
574 */
575 action_ptr = &desc->action;
580 for (;;) { 576 for (;;) {
581 struct irqaction *action = *p; 577 action = *action_ptr;
582 578
583 if (action) { 579 if (!action) {
584 struct irqaction **pp = p; 580 WARN(1, "Trying to free already-free IRQ %d\n", irq);
581 spin_unlock_irqrestore(&desc->lock, flags);
585 582
586 p = &action->next; 583 return NULL;
587 if (action->dev_id != dev_id) 584 }
588 continue;
589 585
590 /* Found it - now remove it from the list of entries */ 586 if (action->dev_id == dev_id)
591 *pp = action->next; 587 break;
588 action_ptr = &action->next;
589 }
592 590
593 /* Currently used only by UML, might disappear one day.*/ 591 /* Found it - now remove it from the list of entries: */
592 *action_ptr = action->next;
593
594 /* Currently used only by UML, might disappear one day: */
594#ifdef CONFIG_IRQ_RELEASE_METHOD 595#ifdef CONFIG_IRQ_RELEASE_METHOD
595 if (desc->chip->release) 596 if (desc->chip->release)
596 desc->chip->release(irq, dev_id); 597 desc->chip->release(irq, dev_id);
597#endif 598#endif
598 599
599 if (!desc->action) { 600 /* If this was the last handler, shut down the IRQ line: */
600 desc->status |= IRQ_DISABLED; 601 if (!desc->action) {
601 if (desc->chip->shutdown) 602 desc->status |= IRQ_DISABLED;
602 desc->chip->shutdown(irq); 603 if (desc->chip->shutdown)
603 else 604 desc->chip->shutdown(irq);
604 desc->chip->disable(irq); 605 else
605 } 606 desc->chip->disable(irq);
606 spin_unlock_irqrestore(&desc->lock, flags); 607 }
607 unregister_handler_proc(irq, action); 608 spin_unlock_irqrestore(&desc->lock, flags);
609
610 unregister_handler_proc(irq, action);
611
612 /* Make sure it's not being used on another CPU: */
613 synchronize_irq(irq);
608 614
609 /* Make sure it's not being used on another CPU */
610 synchronize_irq(irq);
611#ifdef CONFIG_DEBUG_SHIRQ
612 /*
613 * It's a shared IRQ -- the driver ought to be
614 * prepared for it to happen even now it's
615 * being freed, so let's make sure.... We do
616 * this after actually deregistering it, to
617 * make sure that a 'real' IRQ doesn't run in
618 * parallel with our fake
619 */
620 if (action->flags & IRQF_SHARED) {
621 local_irq_save(flags);
622 action->handler(irq, dev_id);
623 local_irq_restore(flags);
624 }
625#endif
626 kfree(action);
627 return;
628 }
629 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
630#ifdef CONFIG_DEBUG_SHIRQ 615#ifdef CONFIG_DEBUG_SHIRQ
631 dump_stack(); 616 /*
632#endif 617 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
633 spin_unlock_irqrestore(&desc->lock, flags); 618 * event to happen even now it's being freed, so let's make sure that
634 return; 619 * is so by doing an extra call to the handler ....
620 *
621 * ( We do this after actually deregistering it, to make sure that a
622 * 'real' IRQ doesn't run in * parallel with our fake. )
623 */
624 if (action->flags & IRQF_SHARED) {
625 local_irq_save(flags);
626 action->handler(irq, dev_id);
627 local_irq_restore(flags);
635 } 628 }
629#endif
630 return action;
631}
632
633/**
634 * remove_irq - free an interrupt
635 * @irq: Interrupt line to free
636 * @act: irqaction for the interrupt
637 *
638 * Used to remove interrupts statically setup by the early boot process.
639 */
640void remove_irq(unsigned int irq, struct irqaction *act)
641{
642 __free_irq(irq, act->dev_id);
643}
644EXPORT_SYMBOL_GPL(remove_irq);
645
646/**
647 * free_irq - free an interrupt allocated with request_irq
648 * @irq: Interrupt line to free
649 * @dev_id: Device identity to free
650 *
651 * Remove an interrupt handler. The handler is removed and if the
652 * interrupt line is no longer in use by any driver it is disabled.
653 * On a shared IRQ the caller must ensure the interrupt is disabled
654 * on the card it drives before calling this function. The function
655 * does not return until any executing interrupts for this IRQ
656 * have completed.
657 *
658 * This function must not be called from interrupt context.
659 */
660void free_irq(unsigned int irq, void *dev_id)
661{
662 kfree(__free_irq(irq, dev_id));
636} 663}
637EXPORT_SYMBOL(free_irq); 664EXPORT_SYMBOL(free_irq);
638 665
@@ -679,11 +706,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
679 * the behavior is classified as "will not fix" so we need to 706 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom. 707 * start nudging drivers away from using that idiom.
681 */ 708 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) 709 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
683 == (IRQF_SHARED|IRQF_DISABLED)) 710 (IRQF_SHARED|IRQF_DISABLED)) {
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not " 711 pr_warning(
685 "guaranteed on shared IRQs\n", 712 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
686 irq, devname); 713 irq, devname);
714 }
687 715
688#ifdef CONFIG_LOCKDEP 716#ifdef CONFIG_LOCKDEP
689 /* 717 /*
@@ -709,15 +737,13 @@ int request_irq(unsigned int irq, irq_handler_t handler,
709 if (!handler) 737 if (!handler)
710 return -EINVAL; 738 return -EINVAL;
711 739
712 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 740 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
713 if (!action) 741 if (!action)
714 return -ENOMEM; 742 return -ENOMEM;
715 743
716 action->handler = handler; 744 action->handler = handler;
717 action->flags = irqflags; 745 action->flags = irqflags;
718 cpus_clear(action->mask);
719 action->name = devname; 746 action->name = devname;
720 action->next = NULL;
721 action->dev_id = dev_id; 747 action->dev_id = dev_id;
722 748
723 retval = __setup_irq(irq, desc, action); 749 retval = __setup_irq(irq, desc, action);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 7f9b80434e32..243d6121e50e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc, 17 struct irq_desc *desc,
18 int cpu, int nr) 18 int cpu, int nr)
19{ 19{
20 unsigned long bytes;
21
22 init_kstat_irqs(desc, cpu, nr); 20 init_kstat_irqs(desc, cpu, nr);
23 21
24 if (desc->kstat_irqs != old_desc->kstat_irqs) { 22 if (desc->kstat_irqs != old_desc->kstat_irqs)
25 /* Compute how many bytes we need per irq and allocate them */ 23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
26 bytes = nr * sizeof(unsigned int); 24 nr * sizeof(*desc->kstat_irqs));
27
28 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
29 }
30} 25}
31 26
32static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 27static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..4d568294de3e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_spurious_irqs(unsigned long dummy) 107static void poll_all_shared_irqs(void)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
123 123
124 try_one_irq(i, desc); 124 try_one_irq(i, desc);
125 } 125 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
126 131
127 mod_timer(&poll_spurious_irq_timer, 132 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129} 134}
130 135
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
131/* 143/*
132 * If 99,900 of the previous 100,000 interrupts have not been handled 144 * If 99,900 of the previous 100,000 interrupts have not been handled
133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 449db466bdbc..ca07c5c0c914 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -9,6 +9,44 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12
13/*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
12#include <linux/latencytop.h> 50#include <linux/latencytop.h>
13#include <linux/kallsyms.h> 51#include <linux/kallsyms.h>
14#include <linux/seq_file.h> 52#include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
72 firstnonnull = i; 110 firstnonnull = i;
73 continue; 111 continue;
74 } 112 }
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
76 unsigned long record = lat->backtrace[q]; 114 unsigned long record = lat->backtrace[q];
77 115
78 if (latency_record[i].backtrace[q] != record) { 116 if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 139 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
102} 140}
103 141
104static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) 142/*
143 * Iterator to store a backtrace into a latency record entry
144 */
145static inline void store_stacktrace(struct task_struct *tsk,
146 struct latency_record *lat)
105{ 147{
106 struct stack_trace trace; 148 struct stack_trace trace;
107 149
108 memset(&trace, 0, sizeof(trace)); 150 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH; 151 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0]; 152 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace); 153 save_stack_trace_tsk(tsk, &trace);
113} 154}
114 155
156/**
157 * __account_scheduler_latency - record an occured latency
158 * @tsk - the task struct of the task hitting the latency
159 * @usecs - the duration of the latency in microseconds
160 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161 *
162 * This function is the main entry point for recording latency entries
163 * as called by the scheduler.
164 *
165 * This function has a few special cases to deal with normal 'non-latency'
166 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167 * since this usually is caused by waiting for events via select() and co.
168 *
169 * Negative latencies (caused by time going backwards) are also explicitly
170 * skipped.
171 */
115void __sched 172void __sched
116account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 173__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
117{ 174{
118 unsigned long flags; 175 unsigned long flags;
119 int i, q; 176 int i, q;
120 struct latency_record lat; 177 struct latency_record lat;
121 178
122 if (!latencytop_enabled)
123 return;
124
125 /* Long interruptible waits are generally user requested... */ 179 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000) 180 if (inter && usecs > 5000)
127 return; 181 return;
128 182
183 /* Negative sleeps are time going backwards */
184 /* Zero-time sleeps are non-interesting */
185 if (usecs <= 0)
186 return;
187
129 memset(&lat, 0, sizeof(lat)); 188 memset(&lat, 0, sizeof(lat));
130 lat.count = 1; 189 lat.count = 1;
131 lat.time = usecs; 190 lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
143 if (tsk->latency_record_count >= LT_SAVECOUNT) 202 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock; 203 goto out_unlock;
145 204
146 for (i = 0; i < LT_SAVECOUNT ; i++) { 205 for (i = 0; i < LT_SAVECOUNT; i++) {
147 struct latency_record *mylat; 206 struct latency_record *mylat;
148 int same = 1; 207 int same = 1;
149 208
150 mylat = &tsk->latency_record[i]; 209 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 210 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
152 unsigned long record = lat.backtrace[q]; 211 unsigned long record = lat.backtrace[q];
153 212
154 if (mylat->backtrace[q] != record) { 213 if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
186 for (i = 0; i < MAXLR; i++) { 245 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) { 246 if (latency_record[i].backtrace[0]) {
188 int q; 247 int q;
189 seq_printf(m, "%i %li %li ", 248 seq_printf(m, "%i %lu %lu ",
190 latency_record[i].count, 249 latency_record[i].count,
191 latency_record[i].time, 250 latency_record[i].time,
192 latency_record[i].max); 251 latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
223 return single_open(filp, lstats_show, NULL); 282 return single_open(filp, lstats_show, NULL);
224} 283}
225 284
226static struct file_operations lstats_fops = { 285static const struct file_operations lstats_fops = {
227 .open = lstats_open, 286 .open = lstats_open,
228 .read = seq_read, 287 .read = seq_read,
229 .write = lstats_write, 288 .write = lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
236 proc_create("latency_stats", 0644, NULL, &lstats_fops); 295 proc_create("latency_stats", 0644, NULL, &lstats_fops);
237 return 0; 296 return 0;
238} 297}
239__initcall(init_lstats_procfs); 298device_initcall(init_lstats_procfs);
diff --git a/kernel/module.c b/kernel/module.c
index f0e04d6b67d8..f77ac320d0b5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -856,7 +856,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
856 mutex_lock(&module_mutex); 856 mutex_lock(&module_mutex);
857 /* Store the name of the last unloaded module for diagnostic purposes */ 857 /* Store the name of the last unloaded module for diagnostic purposes */
858 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 858 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
859 unregister_dynamic_debug_module(mod->name); 859 ddebug_remove_module(mod->name);
860 free_module(mod); 860 free_module(mod);
861 861
862 out: 862 out:
@@ -1861,19 +1861,13 @@ static inline void add_kallsyms(struct module *mod,
1861} 1861}
1862#endif /* CONFIG_KALLSYMS */ 1862#endif /* CONFIG_KALLSYMS */
1863 1863
1864static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num) 1864static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
1865{ 1865{
1866#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG 1866#ifdef CONFIG_DYNAMIC_DEBUG
1867 unsigned int i; 1867 if (ddebug_add_module(debug, num, debug->modname))
1868 1868 printk(KERN_ERR "dynamic debug error adding module: %s\n",
1869 for (i = 0; i < num; i++) { 1869 debug->modname);
1870 register_dynamic_debug_module(debug[i].modname, 1870#endif
1871 debug[i].type,
1872 debug[i].logical_modname,
1873 debug[i].flag_names,
1874 debug[i].hash, debug[i].hash2);
1875 }
1876#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1877} 1871}
1878 1872
1879static void *module_alloc_update_bounds(unsigned long size) 1873static void *module_alloc_update_bounds(unsigned long size)
@@ -2049,14 +2043,6 @@ static noinline struct module *load_module(void __user *umod,
2049 if (err < 0) 2043 if (err < 0)
2050 goto free_mod; 2044 goto free_mod;
2051 2045
2052#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2053 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
2054 mod->name);
2055 if (!mod->refptr) {
2056 err = -ENOMEM;
2057 goto free_mod;
2058 }
2059#endif
2060 if (pcpuindex) { 2046 if (pcpuindex) {
2061 /* We have a special allocation for this section. */ 2047 /* We have a special allocation for this section. */
2062 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2048 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
@@ -2064,7 +2050,7 @@ static noinline struct module *load_module(void __user *umod,
2064 mod->name); 2050 mod->name);
2065 if (!percpu) { 2051 if (!percpu) {
2066 err = -ENOMEM; 2052 err = -ENOMEM;
2067 goto free_percpu; 2053 goto free_mod;
2068 } 2054 }
2069 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2055 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2070 mod->percpu = percpu; 2056 mod->percpu = percpu;
@@ -2116,6 +2102,14 @@ static noinline struct module *load_module(void __user *umod,
2116 /* Module has been moved. */ 2102 /* Module has been moved. */
2117 mod = (void *)sechdrs[modindex].sh_addr; 2103 mod = (void *)sechdrs[modindex].sh_addr;
2118 2104
2105#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2106 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
2107 mod->name);
2108 if (!mod->refptr) {
2109 err = -ENOMEM;
2110 goto free_init;
2111 }
2112#endif
2119 /* Now we've moved module, initialize linked lists, etc. */ 2113 /* Now we've moved module, initialize linked lists, etc. */
2120 module_unload_init(mod); 2114 module_unload_init(mod);
2121 2115
@@ -2247,12 +2241,13 @@ static noinline struct module *load_module(void __user *umod,
2247 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2241 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2248 2242
2249 if (!mod->taints) { 2243 if (!mod->taints) {
2250 struct mod_debug *debug; 2244 struct _ddebug *debug;
2251 unsigned int num_debug; 2245 unsigned int num_debug;
2252 2246
2253 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2247 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2254 sizeof(*debug), &num_debug); 2248 sizeof(*debug), &num_debug);
2255 dynamic_printk_setup(debug, num_debug); 2249 if (debug)
2250 dynamic_debug_setup(debug, num_debug);
2256 } 2251 }
2257 2252
2258 /* sechdrs[0].sh_size is always zero */ 2253 /* sechdrs[0].sh_size is always zero */
@@ -2322,15 +2317,17 @@ static noinline struct module *load_module(void __user *umod,
2322 ftrace_release(mod->module_core, mod->core_size); 2317 ftrace_release(mod->module_core, mod->core_size);
2323 free_unload: 2318 free_unload:
2324 module_unload_free(mod); 2319 module_unload_free(mod);
2320 free_init:
2321#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2322 percpu_modfree(mod->refptr);
2323#endif
2325 module_free(mod, mod->module_init); 2324 module_free(mod, mod->module_init);
2326 free_core: 2325 free_core:
2327 module_free(mod, mod->module_core); 2326 module_free(mod, mod->module_core);
2327 /* mod will be freed with core. Don't access it beyond this line! */
2328 free_percpu: 2328 free_percpu:
2329 if (percpu) 2329 if (percpu)
2330 percpu_modfree(percpu); 2330 percpu_modfree(percpu);
2331#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2332 percpu_modfree(mod->refptr);
2333#endif
2334 free_mod: 2331 free_mod:
2335 kfree(args); 2332 kfree(args);
2336 free_hdr: 2333 free_hdr:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e976e505648d..8e5d9a68b022 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1371 return 1; 1371 return 1;
1372 } 1372 }
1373 return 0; 1373
1374 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1374} 1375}
1375 1376
1376/* 1377/*
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index bd5a9003497c..654c640a6b9c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
679void rcu_check_callbacks(int cpu, int user) 679void rcu_check_callbacks(int cpu, int user)
680{ 680{
681 if (user || 681 if (user ||
682 (idle_cpu(cpu) && !in_softirq() && 682 (idle_cpu(cpu) && rcu_scheduler_active &&
683 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 683 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
684 684
685 /* 685 /*
686 * Get here if this CPU took its interrupt from user 686 * Get here if this CPU took its interrupt from user
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d92a76a881aa..cae8a059cf47 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,6 +44,7 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
47 48
48enum rcu_barrier { 49enum rcu_barrier {
49 RCU_BARRIER_STD, 50 RCU_BARRIER_STD,
@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
55static atomic_t rcu_barrier_cpu_count; 56static atomic_t rcu_barrier_cpu_count;
56static DEFINE_MUTEX(rcu_barrier_mutex); 57static DEFINE_MUTEX(rcu_barrier_mutex);
57static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly;
58 60
59/* 61/*
60 * Awaken the corresponding synchronize_rcu() instance now that a 62 * Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
80void synchronize_rcu(void) 82void synchronize_rcu(void)
81{ 83{
82 struct rcu_synchronize rcu; 84 struct rcu_synchronize rcu;
85
86 if (rcu_blocking_is_gp())
87 return;
88
83 init_completion(&rcu.completion); 89 init_completion(&rcu.completion);
84 /* Will wake me after RCU finished. */ 90 /* Will wake me after RCU finished. */
85 call_rcu(&rcu.head, wakeme_after_rcu); 91 call_rcu(&rcu.head, wakeme_after_rcu);
@@ -175,3 +181,9 @@ void __init rcu_init(void)
175 __rcu_init(); 181 __rcu_init();
176} 182}
177 183
184void rcu_scheduler_starting(void)
185{
186 WARN_ON(num_online_cpus() != 1);
187 WARN_ON(nr_context_switches() > 0);
188 rcu_scheduler_active = 1;
189}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 33cfc50781f9..5d59e850fb71 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
1181{ 1181{
1182 struct rcu_synchronize rcu; 1182 struct rcu_synchronize rcu;
1183 1183
1184 if (num_online_cpus() == 1)
1185 return; /* blocking is gp if only one CPU! */
1186
1184 init_completion(&rcu.completion); 1187 init_completion(&rcu.completion);
1185 /* Will wake me after RCU finished. */ 1188 /* Will wake me after RCU finished. */
1186 call_rcu_sched(&rcu.head, wakeme_after_rcu); 1189 call_rcu_sched(&rcu.head, wakeme_after_rcu);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index b2fd602a6f6f..97ce31579ec0 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
948void rcu_check_callbacks(int cpu, int user) 948void rcu_check_callbacks(int cpu, int user)
949{ 949{
950 if (user || 950 if (user ||
951 (idle_cpu(cpu) && !in_softirq() && 951 (idle_cpu(cpu) && rcu_scheduler_active &&
952 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 952 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
953 953
954 /* 954 /*
955 * Get here if this CPU took its interrupt from user 955 * Get here if this CPU took its interrupt from user
diff --git a/kernel/relay.c b/kernel/relay.c
index 9d79b7854fa6..8f2179c8056f 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
750 * from the scheduler (trying to re-grab 750 * from the scheduler (trying to re-grab
751 * rq->lock), so defer it. 751 * rq->lock), so defer it.
752 */ 752 */
753 __mod_timer(&buf->timer, jiffies + 1); 753 mod_timer(&buf->timer, jiffies + 1);
754 } 754 }
755 755
756 old = buf->data; 756 old = buf->data;
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e5c38e1c8b5..f4c413bdd38d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{ 223{
224 ktime_t now; 224 ktime_t now;
225 225
226 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return; 227 return;
228 228
229 if (hrtimer_active(&rt_b->rt_period_timer)) 229 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
331 */ 331 */
332static DEFINE_SPINLOCK(task_group_lock); 332static DEFINE_SPINLOCK(task_group_lock);
333 333
334#ifdef CONFIG_SMP
335static int root_task_group_empty(void)
336{
337 return list_empty(&root_task_group.children);
338}
339#endif
340
334#ifdef CONFIG_FAIR_GROUP_SCHED 341#ifdef CONFIG_FAIR_GROUP_SCHED
335#ifdef CONFIG_USER_SCHED 342#ifdef CONFIG_USER_SCHED
336# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 343# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
391 398
392#else 399#else
393 400
401#ifdef CONFIG_SMP
402static int root_task_group_empty(void)
403{
404 return 1;
405}
406#endif
407
394static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 408static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
395static inline struct task_group *task_group(struct task_struct *p) 409static inline struct task_group *task_group(struct task_struct *p)
396{ 410{
@@ -467,11 +481,17 @@ struct rt_rq {
467 struct rt_prio_array active; 481 struct rt_prio_array active;
468 unsigned long rt_nr_running; 482 unsigned long rt_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 483#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */ 484 struct {
485 int curr; /* highest queued rt task prio */
486#ifdef CONFIG_SMP
487 int next; /* next highest */
488#endif
489 } highest_prio;
471#endif 490#endif
472#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory; 492 unsigned long rt_nr_migratory;
474 int overloaded; 493 int overloaded;
494 struct plist_head pushable_tasks;
475#endif 495#endif
476 int rt_throttled; 496 int rt_throttled;
477 u64 rt_time; 497 u64 rt_time;
@@ -549,7 +569,6 @@ struct rq {
549 unsigned long nr_running; 569 unsigned long nr_running;
550 #define CPU_LOAD_IDX_MAX 5 570 #define CPU_LOAD_IDX_MAX 5
551 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 571 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
552 unsigned char idle_at_tick;
553#ifdef CONFIG_NO_HZ 572#ifdef CONFIG_NO_HZ
554 unsigned long last_tick_seen; 573 unsigned long last_tick_seen;
555 unsigned char in_nohz_recently; 574 unsigned char in_nohz_recently;
@@ -590,6 +609,7 @@ struct rq {
590 struct root_domain *rd; 609 struct root_domain *rd;
591 struct sched_domain *sd; 610 struct sched_domain *sd;
592 611
612 unsigned char idle_at_tick;
593 /* For active balancing */ 613 /* For active balancing */
594 int active_balance; 614 int active_balance;
595 int push_cpu; 615 int push_cpu;
@@ -618,9 +638,6 @@ struct rq {
618 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 638 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
619 639
620 /* sys_sched_yield() stats */ 640 /* sys_sched_yield() stats */
621 unsigned int yld_exp_empty;
622 unsigned int yld_act_empty;
623 unsigned int yld_both_empty;
624 unsigned int yld_count; 641 unsigned int yld_count;
625 642
626 /* schedule() stats */ 643 /* schedule() stats */
@@ -1183,10 +1200,10 @@ static void resched_task(struct task_struct *p)
1183 1200
1184 assert_spin_locked(&task_rq(p)->lock); 1201 assert_spin_locked(&task_rq(p)->lock);
1185 1202
1186 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) 1203 if (test_tsk_need_resched(p))
1187 return; 1204 return;
1188 1205
1189 set_tsk_thread_flag(p, TIF_NEED_RESCHED); 1206 set_tsk_need_resched(p);
1190 1207
1191 cpu = task_cpu(p); 1208 cpu = task_cpu(p);
1192 if (cpu == smp_processor_id()) 1209 if (cpu == smp_processor_id())
@@ -1242,7 +1259,7 @@ void wake_up_idle_cpu(int cpu)
1242 * lockless. The worst case is that the other CPU runs the 1259 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule() 1260 * idle task through an additional NOOP schedule()
1244 */ 1261 */
1245 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); 1262 set_tsk_need_resched(rq->idle);
1246 1263
1247 /* NEED_RESCHED must be visible before we test polling */ 1264 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb(); 1265 smp_mb();
@@ -1610,21 +1627,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1610 1627
1611#endif 1628#endif
1612 1629
1630#ifdef CONFIG_PREEMPT
1631
1613/* 1632/*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1633 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1634 * way at the expense of forcing extra atomic operations in all
1635 * invocations. This assures that the double_lock is acquired using the
1636 * same underlying policy as the spinlock_t on this architecture, which
1637 * reduces latency compared to the unfair variant below. However, it
1638 * also adds more overhead and therefore may reduce throughput.
1615 */ 1639 */
1616static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1640static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(this_rq->lock)
1642 __acquires(busiest->lock)
1643 __acquires(this_rq->lock)
1644{
1645 spin_unlock(&this_rq->lock);
1646 double_rq_lock(this_rq, busiest);
1647
1648 return 1;
1649}
1650
1651#else
1652/*
1653 * Unfair double_lock_balance: Optimizes throughput at the expense of
1654 * latency by eliminating extra atomic operations when the locks are
1655 * already in proper order on entry. This favors lower cpu-ids and will
1656 * grant the double lock to lower cpus over higher ids under contention,
1657 * regardless of entry order into the function.
1658 */
1659static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock) 1660 __releases(this_rq->lock)
1618 __acquires(busiest->lock) 1661 __acquires(busiest->lock)
1619 __acquires(this_rq->lock) 1662 __acquires(this_rq->lock)
1620{ 1663{
1621 int ret = 0; 1664 int ret = 0;
1622 1665
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) { 1666 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) { 1667 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock); 1668 spin_unlock(&this_rq->lock);
@@ -1637,6 +1675,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637 return ret; 1675 return ret;
1638} 1676}
1639 1677
1678#endif /* CONFIG_PREEMPT */
1679
1680/*
1681 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1682 */
1683static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1684{
1685 if (unlikely(!irqs_disabled())) {
1686 /* printk() doesn't work good under rq->lock */
1687 spin_unlock(&this_rq->lock);
1688 BUG_ON(1);
1689 }
1690
1691 return _double_lock_balance(this_rq, busiest);
1692}
1693
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1694static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock) 1695 __releases(busiest->lock)
1642{ 1696{
@@ -1705,6 +1759,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1759
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1760static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1761{
1762 if (wakeup)
1763 p->se.start_runtime = p->se.sum_exec_runtime;
1764
1708 sched_info_queued(p); 1765 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1766 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1767 p->se.on_rq = 1;
@@ -1712,10 +1769,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1769
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1770static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1771{
1715 if (sleep && p->se.last_wakeup) { 1772 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1773 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1774 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1775 p->se.sum_exec_runtime - p->se.last_wakeup);
1776 p->se.last_wakeup = 0;
1777 } else {
1778 update_avg(&p->se.avg_wakeup,
1779 sysctl_sched_wakeup_granularity);
1780 }
1719 } 1781 }
1720 1782
1721 sched_info_dequeued(p); 1783 sched_info_dequeued(p);
@@ -2017,7 +2079,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2017 * it must be off the runqueue _entirely_, and not 2079 * it must be off the runqueue _entirely_, and not
2018 * preempted! 2080 * preempted!
2019 * 2081 *
2020 * So if it wa still runnable (but just not actively 2082 * So if it was still runnable (but just not actively
2021 * running right now), it's preempted, and we should 2083 * running right now), it's preempted, and we should
2022 * yield - it could be a while. 2084 * yield - it could be a while.
2023 */ 2085 */
@@ -2267,7 +2329,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2267 sync = 0; 2329 sync = 0;
2268 2330
2269#ifdef CONFIG_SMP 2331#ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) { 2332 if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
2271 struct sched_domain *sd; 2333 struct sched_domain *sd;
2272 2334
2273 this_cpu = raw_smp_processor_id(); 2335 this_cpu = raw_smp_processor_id();
@@ -2345,6 +2407,22 @@ out_activate:
2345 activate_task(rq, p, 1); 2407 activate_task(rq, p, 1);
2346 success = 1; 2408 success = 1;
2347 2409
2410 /*
2411 * Only attribute actual wakeups done by this task.
2412 */
2413 if (!in_interrupt()) {
2414 struct sched_entity *se = &current->se;
2415 u64 sample = se->sum_exec_runtime;
2416
2417 if (se->last_wakeup)
2418 sample -= se->last_wakeup;
2419 else
2420 sample -= se->start_runtime;
2421 update_avg(&se->avg_wakeup, sample);
2422
2423 se->last_wakeup = se->sum_exec_runtime;
2424 }
2425
2348out_running: 2426out_running:
2349 trace_sched_wakeup(rq, p, success); 2427 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync); 2428 check_preempt_curr(rq, p, sync);
@@ -2355,8 +2433,6 @@ out_running:
2355 p->sched_class->task_wake_up(rq, p); 2433 p->sched_class->task_wake_up(rq, p);
2356#endif 2434#endif
2357out: 2435out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags); 2436 task_rq_unlock(rq, &flags);
2361 2437
2362 return success; 2438 return success;
@@ -2386,6 +2462,8 @@ static void __sched_fork(struct task_struct *p)
2386 p->se.prev_sum_exec_runtime = 0; 2462 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0; 2463 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0; 2464 p->se.avg_overlap = 0;
2465 p->se.start_runtime = 0;
2466 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2389 2467
2390#ifdef CONFIG_SCHEDSTATS 2468#ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0; 2469 p->se.wait_start = 0;
@@ -2448,6 +2526,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2448 /* Want to start with kernel preemption disabled. */ 2526 /* Want to start with kernel preemption disabled. */
2449 task_thread_info(p)->preempt_count = 1; 2527 task_thread_info(p)->preempt_count = 1;
2450#endif 2528#endif
2529 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2530
2451 put_cpu(); 2531 put_cpu();
2452} 2532}
2453 2533
@@ -2491,7 +2571,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2491#ifdef CONFIG_PREEMPT_NOTIFIERS 2571#ifdef CONFIG_PREEMPT_NOTIFIERS
2492 2572
2493/** 2573/**
2494 * preempt_notifier_register - tell me when current is being being preempted & rescheduled 2574 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2495 * @notifier: notifier struct to register 2575 * @notifier: notifier struct to register
2496 */ 2576 */
2497void preempt_notifier_register(struct preempt_notifier *notifier) 2577void preempt_notifier_register(struct preempt_notifier *notifier)
@@ -2588,6 +2668,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2588{ 2668{
2589 struct mm_struct *mm = rq->prev_mm; 2669 struct mm_struct *mm = rq->prev_mm;
2590 long prev_state; 2670 long prev_state;
2671#ifdef CONFIG_SMP
2672 int post_schedule = 0;
2673
2674 if (current->sched_class->needs_post_schedule)
2675 post_schedule = current->sched_class->needs_post_schedule(rq);
2676#endif
2591 2677
2592 rq->prev_mm = NULL; 2678 rq->prev_mm = NULL;
2593 2679
@@ -2606,7 +2692,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2606 finish_arch_switch(prev); 2692 finish_arch_switch(prev);
2607 finish_lock_switch(rq, prev); 2693 finish_lock_switch(rq, prev);
2608#ifdef CONFIG_SMP 2694#ifdef CONFIG_SMP
2609 if (current->sched_class->post_schedule) 2695 if (post_schedule)
2610 current->sched_class->post_schedule(rq); 2696 current->sched_class->post_schedule(rq);
2611#endif 2697#endif
2612 2698
@@ -2913,6 +2999,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2913 struct sched_domain *sd, enum cpu_idle_type idle, 2999 struct sched_domain *sd, enum cpu_idle_type idle,
2914 int *all_pinned) 3000 int *all_pinned)
2915{ 3001{
3002 int tsk_cache_hot = 0;
2916 /* 3003 /*
2917 * We do not migrate tasks that are: 3004 * We do not migrate tasks that are:
2918 * 1) running (obviously), or 3005 * 1) running (obviously), or
@@ -2936,10 +3023,11 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2936 * 2) too many balance attempts have failed. 3023 * 2) too many balance attempts have failed.
2937 */ 3024 */
2938 3025
2939 if (!task_hot(p, rq->clock, sd) || 3026 tsk_cache_hot = task_hot(p, rq->clock, sd);
2940 sd->nr_balance_failed > sd->cache_nice_tries) { 3027 if (!tsk_cache_hot ||
3028 sd->nr_balance_failed > sd->cache_nice_tries) {
2941#ifdef CONFIG_SCHEDSTATS 3029#ifdef CONFIG_SCHEDSTATS
2942 if (task_hot(p, rq->clock, sd)) { 3030 if (tsk_cache_hot) {
2943 schedstat_inc(sd, lb_hot_gained[idle]); 3031 schedstat_inc(sd, lb_hot_gained[idle]);
2944 schedstat_inc(p, se.nr_forced_migrations); 3032 schedstat_inc(p, se.nr_forced_migrations);
2945 } 3033 }
@@ -2947,7 +3035,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2947 return 1; 3035 return 1;
2948 } 3036 }
2949 3037
2950 if (task_hot(p, rq->clock, sd)) { 3038 if (tsk_cache_hot) {
2951 schedstat_inc(p, se.nr_failed_migrations_hot); 3039 schedstat_inc(p, se.nr_failed_migrations_hot);
2952 return 0; 3040 return 0;
2953 } 3041 }
@@ -2987,6 +3075,16 @@ next:
2987 pulled++; 3075 pulled++;
2988 rem_load_move -= p->se.load.weight; 3076 rem_load_move -= p->se.load.weight;
2989 3077
3078#ifdef CONFIG_PREEMPT
3079 /*
3080 * NEWIDLE balancing is a source of latency, so preemptible kernels
3081 * will stop after the first task is pulled to minimize the critical
3082 * section.
3083 */
3084 if (idle == CPU_NEWLY_IDLE)
3085 goto out;
3086#endif
3087
2990 /* 3088 /*
2991 * We only want to steal up to the prescribed amount of weighted load. 3089 * We only want to steal up to the prescribed amount of weighted load.
2992 */ 3090 */
@@ -3033,9 +3131,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3033 sd, idle, all_pinned, &this_best_prio); 3131 sd, idle, all_pinned, &this_best_prio);
3034 class = class->next; 3132 class = class->next;
3035 3133
3134#ifdef CONFIG_PREEMPT
3135 /*
3136 * NEWIDLE balancing is a source of latency, so preemptible
3137 * kernels will stop after the first task is pulled to minimize
3138 * the critical section.
3139 */
3036 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3140 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3037 break; 3141 break;
3038 3142#endif
3039 } while (class && max_load_move > total_load_moved); 3143 } while (class && max_load_move > total_load_moved);
3040 3144
3041 return total_load_moved > 0; 3145 return total_load_moved > 0;
@@ -3085,246 +3189,479 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3085 3189
3086 return 0; 3190 return 0;
3087} 3191}
3192/********** Helpers for find_busiest_group ************************/
3193/**
3194 * sd_lb_stats - Structure to store the statistics of a sched_domain
3195 * during load balancing.
3196 */
3197struct sd_lb_stats {
3198 struct sched_group *busiest; /* Busiest group in this sd */
3199 struct sched_group *this; /* Local group in this sd */
3200 unsigned long total_load; /* Total load of all groups in sd */
3201 unsigned long total_pwr; /* Total power of all groups in sd */
3202 unsigned long avg_load; /* Average load across all groups in sd */
3203
3204 /** Statistics of this group */
3205 unsigned long this_load;
3206 unsigned long this_load_per_task;
3207 unsigned long this_nr_running;
3208
3209 /* Statistics of the busiest group */
3210 unsigned long max_load;
3211 unsigned long busiest_load_per_task;
3212 unsigned long busiest_nr_running;
3213
3214 int group_imb; /* Is there imbalance in this sd */
3215#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3216 int power_savings_balance; /* Is powersave balance needed for this sd */
3217 struct sched_group *group_min; /* Least loaded group in sd */
3218 struct sched_group *group_leader; /* Group which relieves group_min */
3219 unsigned long min_load_per_task; /* load_per_task in group_min */
3220 unsigned long leader_nr_running; /* Nr running of group_leader */
3221 unsigned long min_nr_running; /* Nr running of group_min */
3222#endif
3223};
3088 3224
3089/* 3225/**
3090 * find_busiest_group finds and returns the busiest CPU group within the 3226 * sg_lb_stats - stats of a sched_group required for load_balancing
3091 * domain. It calculates and returns the amount of weighted load which 3227 */
3092 * should be moved to restore balance via the imbalance parameter. 3228struct sg_lb_stats {
3229 unsigned long avg_load; /*Avg load across the CPUs of the group */
3230 unsigned long group_load; /* Total load over the CPUs of the group */
3231 unsigned long sum_nr_running; /* Nr tasks running in the group */
3232 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3233 unsigned long group_capacity;
3234 int group_imb; /* Is there an imbalance in the group ? */
3235};
3236
3237/**
3238 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3239 * @group: The group whose first cpu is to be returned.
3093 */ 3240 */
3094static struct sched_group * 3241static inline unsigned int group_first_cpu(struct sched_group *group)
3095find_busiest_group(struct sched_domain *sd, int this_cpu,
3096 unsigned long *imbalance, enum cpu_idle_type idle,
3097 int *sd_idle, const struct cpumask *cpus, int *balance)
3098{ 3242{
3099 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3243 return cpumask_first(sched_group_cpus(group));
3100 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 3244}
3101 unsigned long max_pull;
3102 unsigned long busiest_load_per_task, busiest_nr_running;
3103 unsigned long this_load_per_task, this_nr_running;
3104 int load_idx, group_imb = 0;
3105#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3106 int power_savings_balance = 1;
3107 unsigned long leader_nr_running = 0, min_load_per_task = 0;
3108 unsigned long min_nr_running = ULONG_MAX;
3109 struct sched_group *group_min = NULL, *group_leader = NULL;
3110#endif
3111 3245
3112 max_load = this_load = total_load = total_pwr = 0; 3246/**
3113 busiest_load_per_task = busiest_nr_running = 0; 3247 * get_sd_load_idx - Obtain the load index for a given sched domain.
3114 this_load_per_task = this_nr_running = 0; 3248 * @sd: The sched_domain whose load_idx is to be obtained.
3249 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3250 */
3251static inline int get_sd_load_idx(struct sched_domain *sd,
3252 enum cpu_idle_type idle)
3253{
3254 int load_idx;
3115 3255
3116 if (idle == CPU_NOT_IDLE) 3256 switch (idle) {
3257 case CPU_NOT_IDLE:
3117 load_idx = sd->busy_idx; 3258 load_idx = sd->busy_idx;
3118 else if (idle == CPU_NEWLY_IDLE) 3259 break;
3260
3261 case CPU_NEWLY_IDLE:
3119 load_idx = sd->newidle_idx; 3262 load_idx = sd->newidle_idx;
3120 else 3263 break;
3264 default:
3121 load_idx = sd->idle_idx; 3265 load_idx = sd->idle_idx;
3266 break;
3267 }
3122 3268
3123 do { 3269 return load_idx;
3124 unsigned long load, group_capacity, max_cpu_load, min_cpu_load; 3270}
3125 int local_group;
3126 int i;
3127 int __group_imb = 0;
3128 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3129 unsigned long sum_nr_running, sum_weighted_load;
3130 unsigned long sum_avg_load_per_task;
3131 unsigned long avg_load_per_task;
3132 3271
3133 local_group = cpumask_test_cpu(this_cpu,
3134 sched_group_cpus(group));
3135 3272
3136 if (local_group) 3273#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3137 balance_cpu = cpumask_first(sched_group_cpus(group)); 3274/**
3275 * init_sd_power_savings_stats - Initialize power savings statistics for
3276 * the given sched_domain, during load balancing.
3277 *
3278 * @sd: Sched domain whose power-savings statistics are to be initialized.
3279 * @sds: Variable containing the statistics for sd.
3280 * @idle: Idle status of the CPU at which we're performing load-balancing.
3281 */
3282static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3283 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3284{
3285 /*
3286 * Busy processors will not participate in power savings
3287 * balance.
3288 */
3289 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3290 sds->power_savings_balance = 0;
3291 else {
3292 sds->power_savings_balance = 1;
3293 sds->min_nr_running = ULONG_MAX;
3294 sds->leader_nr_running = 0;
3295 }
3296}
3138 3297
3139 /* Tally up the load of all CPUs in the group */ 3298/**
3140 sum_weighted_load = sum_nr_running = avg_load = 0; 3299 * update_sd_power_savings_stats - Update the power saving stats for a
3141 sum_avg_load_per_task = avg_load_per_task = 0; 3300 * sched_domain while performing load balancing.
3301 *
3302 * @group: sched_group belonging to the sched_domain under consideration.
3303 * @sds: Variable containing the statistics of the sched_domain
3304 * @local_group: Does group contain the CPU for which we're performing
3305 * load balancing ?
3306 * @sgs: Variable containing the statistics of the group.
3307 */
3308static inline void update_sd_power_savings_stats(struct sched_group *group,
3309 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3310{
3311
3312 if (!sds->power_savings_balance)
3313 return;
3142 3314
3143 max_cpu_load = 0; 3315 /*
3144 min_cpu_load = ~0UL; 3316 * If the local group is idle or completely loaded
3317 * no need to do power savings balance at this domain
3318 */
3319 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3320 !sds->this_nr_running))
3321 sds->power_savings_balance = 0;
3145 3322
3146 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3323 /*
3147 struct rq *rq = cpu_rq(i); 3324 * If a group is already running at full capacity or idle,
3325 * don't include that group in power savings calculations
3326 */
3327 if (!sds->power_savings_balance ||
3328 sgs->sum_nr_running >= sgs->group_capacity ||
3329 !sgs->sum_nr_running)
3330 return;
3148 3331
3149 if (*sd_idle && rq->nr_running) 3332 /*
3150 *sd_idle = 0; 3333 * Calculate the group which has the least non-idle load.
3334 * This is the group from where we need to pick up the load
3335 * for saving power
3336 */
3337 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3338 (sgs->sum_nr_running == sds->min_nr_running &&
3339 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3340 sds->group_min = group;
3341 sds->min_nr_running = sgs->sum_nr_running;
3342 sds->min_load_per_task = sgs->sum_weighted_load /
3343 sgs->sum_nr_running;
3344 }
3151 3345
3152 /* Bias balancing toward cpus of our domain */ 3346 /*
3153 if (local_group) { 3347 * Calculate the group which is almost near its
3154 if (idle_cpu(i) && !first_idle_cpu) { 3348 * capacity but still has some space to pick up some load
3155 first_idle_cpu = 1; 3349 * from other group and save more power
3156 balance_cpu = i; 3350 */
3157 } 3351 if (sgs->sum_nr_running > sgs->group_capacity - 1)
3352 return;
3158 3353
3159 load = target_load(i, load_idx); 3354 if (sgs->sum_nr_running > sds->leader_nr_running ||
3160 } else { 3355 (sgs->sum_nr_running == sds->leader_nr_running &&
3161 load = source_load(i, load_idx); 3356 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3162 if (load > max_cpu_load) 3357 sds->group_leader = group;
3163 max_cpu_load = load; 3358 sds->leader_nr_running = sgs->sum_nr_running;
3164 if (min_cpu_load > load) 3359 }
3165 min_cpu_load = load; 3360}
3166 }
3167 3361
3168 avg_load += load; 3362/**
3169 sum_nr_running += rq->nr_running; 3363 * check_power_save_busiest_group - Check if we have potential to perform
3170 sum_weighted_load += weighted_cpuload(i); 3364 * some power-savings balance. If yes, set the busiest group to be
3365 * the least loaded group in the sched_domain, so that it's CPUs can
3366 * be put to idle.
3367 *
3368 * @sds: Variable containing the statistics of the sched_domain
3369 * under consideration.
3370 * @this_cpu: Cpu at which we're currently performing load-balancing.
3371 * @imbalance: Variable to store the imbalance.
3372 *
3373 * Returns 1 if there is potential to perform power-savings balance.
3374 * Else returns 0.
3375 */
3376static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3377 int this_cpu, unsigned long *imbalance)
3378{
3379 if (!sds->power_savings_balance)
3380 return 0;
3171 3381
3172 sum_avg_load_per_task += cpu_avg_load_per_task(i); 3382 if (sds->this != sds->group_leader ||
3173 } 3383 sds->group_leader == sds->group_min)
3384 return 0;
3174 3385
3175 /* 3386 *imbalance = sds->min_load_per_task;
3176 * First idle cpu or the first cpu(busiest) in this sched group 3387 sds->busiest = sds->group_min;
3177 * is eligible for doing load balancing at this and above
3178 * domains. In the newly idle case, we will allow all the cpu's
3179 * to do the newly idle load balance.
3180 */
3181 if (idle != CPU_NEWLY_IDLE && local_group &&
3182 balance_cpu != this_cpu && balance) {
3183 *balance = 0;
3184 goto ret;
3185 }
3186 3388
3187 total_load += avg_load; 3389 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3188 total_pwr += group->__cpu_power; 3390 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3391 group_first_cpu(sds->group_leader);
3392 }
3189 3393
3190 /* Adjust by relative CPU power of the group */ 3394 return 1;
3191 avg_load = sg_div_cpu_power(group,
3192 avg_load * SCHED_LOAD_SCALE);
3193 3395
3396}
3397#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3398static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3399 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3400{
3401 return;
3402}
3194 3403
3195 /* 3404static inline void update_sd_power_savings_stats(struct sched_group *group,
3196 * Consider the group unbalanced when the imbalance is larger 3405 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3197 * than the average weight of two tasks. 3406{
3198 * 3407 return;
3199 * APZ: with cgroup the avg task weight can vary wildly and 3408}
3200 * might not be a suitable number - should we keep a 3409
3201 * normalized nr_running number somewhere that negates 3410static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3202 * the hierarchy? 3411 int this_cpu, unsigned long *imbalance)
3203 */ 3412{
3204 avg_load_per_task = sg_div_cpu_power(group, 3413 return 0;
3205 sum_avg_load_per_task * SCHED_LOAD_SCALE); 3414}
3415#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3416
3417
3418/**
3419 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3420 * @group: sched_group whose statistics are to be updated.
3421 * @this_cpu: Cpu for which load balance is currently performed.
3422 * @idle: Idle status of this_cpu
3423 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3424 * @sd_idle: Idle status of the sched_domain containing group.
3425 * @local_group: Does group contain this_cpu.
3426 * @cpus: Set of cpus considered for load balancing.
3427 * @balance: Should we balance.
3428 * @sgs: variable to hold the statistics for this group.
3429 */
3430static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3431 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3432 int local_group, const struct cpumask *cpus,
3433 int *balance, struct sg_lb_stats *sgs)
3434{
3435 unsigned long load, max_cpu_load, min_cpu_load;
3436 int i;
3437 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3438 unsigned long sum_avg_load_per_task;
3439 unsigned long avg_load_per_task;
3206 3440
3207 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3441 if (local_group)
3208 __group_imb = 1; 3442 balance_cpu = group_first_cpu(group);
3209 3443
3210 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3444 /* Tally up the load of all CPUs in the group */
3445 sum_avg_load_per_task = avg_load_per_task = 0;
3446 max_cpu_load = 0;
3447 min_cpu_load = ~0UL;
3211 3448
3449 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3450 struct rq *rq = cpu_rq(i);
3451
3452 if (*sd_idle && rq->nr_running)
3453 *sd_idle = 0;
3454
3455 /* Bias balancing toward cpus of our domain */
3212 if (local_group) { 3456 if (local_group) {
3213 this_load = avg_load; 3457 if (idle_cpu(i) && !first_idle_cpu) {
3214 this = group; 3458 first_idle_cpu = 1;
3215 this_nr_running = sum_nr_running; 3459 balance_cpu = i;
3216 this_load_per_task = sum_weighted_load; 3460 }
3217 } else if (avg_load > max_load && 3461
3218 (sum_nr_running > group_capacity || __group_imb)) { 3462 load = target_load(i, load_idx);
3219 max_load = avg_load; 3463 } else {
3220 busiest = group; 3464 load = source_load(i, load_idx);
3221 busiest_nr_running = sum_nr_running; 3465 if (load > max_cpu_load)
3222 busiest_load_per_task = sum_weighted_load; 3466 max_cpu_load = load;
3223 group_imb = __group_imb; 3467 if (min_cpu_load > load)
3468 min_cpu_load = load;
3224 } 3469 }
3225 3470
3226#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3471 sgs->group_load += load;
3227 /* 3472 sgs->sum_nr_running += rq->nr_running;
3228 * Busy processors will not participate in power savings 3473 sgs->sum_weighted_load += weighted_cpuload(i);
3229 * balance.
3230 */
3231 if (idle == CPU_NOT_IDLE ||
3232 !(sd->flags & SD_POWERSAVINGS_BALANCE))
3233 goto group_next;
3234 3474
3235 /* 3475 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3236 * If the local group is idle or completely loaded 3476 }
3237 * no need to do power savings balance at this domain
3238 */
3239 if (local_group && (this_nr_running >= group_capacity ||
3240 !this_nr_running))
3241 power_savings_balance = 0;
3242 3477
3243 /* 3478 /*
3244 * If a group is already running at full capacity or idle, 3479 * First idle cpu or the first cpu(busiest) in this sched group
3245 * don't include that group in power savings calculations 3480 * is eligible for doing load balancing at this and above
3246 */ 3481 * domains. In the newly idle case, we will allow all the cpu's
3247 if (!power_savings_balance || sum_nr_running >= group_capacity 3482 * to do the newly idle load balance.
3248 || !sum_nr_running) 3483 */
3249 goto group_next; 3484 if (idle != CPU_NEWLY_IDLE && local_group &&
3485 balance_cpu != this_cpu && balance) {
3486 *balance = 0;
3487 return;
3488 }
3250 3489
3251 /* 3490 /* Adjust by relative CPU power of the group */
3252 * Calculate the group which has the least non-idle load. 3491 sgs->avg_load = sg_div_cpu_power(group,
3253 * This is the group from where we need to pick up the load 3492 sgs->group_load * SCHED_LOAD_SCALE);
3254 * for saving power
3255 */
3256 if ((sum_nr_running < min_nr_running) ||
3257 (sum_nr_running == min_nr_running &&
3258 cpumask_first(sched_group_cpus(group)) >
3259 cpumask_first(sched_group_cpus(group_min)))) {
3260 group_min = group;
3261 min_nr_running = sum_nr_running;
3262 min_load_per_task = sum_weighted_load /
3263 sum_nr_running;
3264 }
3265 3493
3266 /* 3494
3267 * Calculate the group which is almost near its 3495 /*
3268 * capacity but still has some space to pick up some load 3496 * Consider the group unbalanced when the imbalance is larger
3269 * from other group and save more power 3497 * than the average weight of two tasks.
3270 */ 3498 *
3271 if (sum_nr_running <= group_capacity - 1) { 3499 * APZ: with cgroup the avg task weight can vary wildly and
3272 if (sum_nr_running > leader_nr_running || 3500 * might not be a suitable number - should we keep a
3273 (sum_nr_running == leader_nr_running && 3501 * normalized nr_running number somewhere that negates
3274 cpumask_first(sched_group_cpus(group)) < 3502 * the hierarchy?
3275 cpumask_first(sched_group_cpus(group_leader)))) { 3503 */
3276 group_leader = group; 3504 avg_load_per_task = sg_div_cpu_power(group,
3277 leader_nr_running = sum_nr_running; 3505 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3278 } 3506
3507 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3508 sgs->group_imb = 1;
3509
3510 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3511
3512}
3513
3514/**
3515 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3516 * @sd: sched_domain whose statistics are to be updated.
3517 * @this_cpu: Cpu for which load balance is currently performed.
3518 * @idle: Idle status of this_cpu
3519 * @sd_idle: Idle status of the sched_domain containing group.
3520 * @cpus: Set of cpus considered for load balancing.
3521 * @balance: Should we balance.
3522 * @sds: variable to hold the statistics for this sched_domain.
3523 */
3524static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3525 enum cpu_idle_type idle, int *sd_idle,
3526 const struct cpumask *cpus, int *balance,
3527 struct sd_lb_stats *sds)
3528{
3529 struct sched_group *group = sd->groups;
3530 struct sg_lb_stats sgs;
3531 int load_idx;
3532
3533 init_sd_power_savings_stats(sd, sds, idle);
3534 load_idx = get_sd_load_idx(sd, idle);
3535
3536 do {
3537 int local_group;
3538
3539 local_group = cpumask_test_cpu(this_cpu,
3540 sched_group_cpus(group));
3541 memset(&sgs, 0, sizeof(sgs));
3542 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
3543 local_group, cpus, balance, &sgs);
3544
3545 if (local_group && balance && !(*balance))
3546 return;
3547
3548 sds->total_load += sgs.group_load;
3549 sds->total_pwr += group->__cpu_power;
3550
3551 if (local_group) {
3552 sds->this_load = sgs.avg_load;
3553 sds->this = group;
3554 sds->this_nr_running = sgs.sum_nr_running;
3555 sds->this_load_per_task = sgs.sum_weighted_load;
3556 } else if (sgs.avg_load > sds->max_load &&
3557 (sgs.sum_nr_running > sgs.group_capacity ||
3558 sgs.group_imb)) {
3559 sds->max_load = sgs.avg_load;
3560 sds->busiest = group;
3561 sds->busiest_nr_running = sgs.sum_nr_running;
3562 sds->busiest_load_per_task = sgs.sum_weighted_load;
3563 sds->group_imb = sgs.group_imb;
3279 } 3564 }
3280group_next: 3565
3281#endif 3566 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3282 group = group->next; 3567 group = group->next;
3283 } while (group != sd->groups); 3568 } while (group != sd->groups);
3284 3569
3285 if (!busiest || this_load >= max_load || busiest_nr_running == 0) 3570}
3286 goto out_balanced;
3287
3288 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
3289 3571
3290 if (this_load >= avg_load || 3572/**
3291 100*max_load <= sd->imbalance_pct*this_load) 3573 * fix_small_imbalance - Calculate the minor imbalance that exists
3292 goto out_balanced; 3574 * amongst the groups of a sched_domain, during
3575 * load balancing.
3576 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3577 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3578 * @imbalance: Variable to store the imbalance.
3579 */
3580static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3581 int this_cpu, unsigned long *imbalance)
3582{
3583 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3584 unsigned int imbn = 2;
3585
3586 if (sds->this_nr_running) {
3587 sds->this_load_per_task /= sds->this_nr_running;
3588 if (sds->busiest_load_per_task >
3589 sds->this_load_per_task)
3590 imbn = 1;
3591 } else
3592 sds->this_load_per_task =
3593 cpu_avg_load_per_task(this_cpu);
3293 3594
3294 busiest_load_per_task /= busiest_nr_running; 3595 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3295 if (group_imb) 3596 sds->busiest_load_per_task * imbn) {
3296 busiest_load_per_task = min(busiest_load_per_task, avg_load); 3597 *imbalance = sds->busiest_load_per_task;
3598 return;
3599 }
3297 3600
3298 /* 3601 /*
3299 * We're trying to get all the cpus to the average_load, so we don't 3602 * OK, we don't have enough imbalance to justify moving tasks,
3300 * want to push ourselves above the average load, nor do we wish to 3603 * however we may be able to increase total CPU power used by
3301 * reduce the max loaded cpu below the average load, as either of these 3604 * moving them.
3302 * actions would just result in more rebalancing later, and ping-pong
3303 * tasks around. Thus we look for the minimum possible imbalance.
3304 * Negative imbalances (*we* are more loaded than anyone else) will
3305 * be counted as no imbalance for these purposes -- we can't fix that
3306 * by pulling tasks to us. Be careful of negative numbers as they'll
3307 * appear as very large values with unsigned longs.
3308 */ 3605 */
3309 if (max_load <= busiest_load_per_task)
3310 goto out_balanced;
3311 3606
3607 pwr_now += sds->busiest->__cpu_power *
3608 min(sds->busiest_load_per_task, sds->max_load);
3609 pwr_now += sds->this->__cpu_power *
3610 min(sds->this_load_per_task, sds->this_load);
3611 pwr_now /= SCHED_LOAD_SCALE;
3612
3613 /* Amount of load we'd subtract */
3614 tmp = sg_div_cpu_power(sds->busiest,
3615 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3616 if (sds->max_load > tmp)
3617 pwr_move += sds->busiest->__cpu_power *
3618 min(sds->busiest_load_per_task, sds->max_load - tmp);
3619
3620 /* Amount of load we'd add */
3621 if (sds->max_load * sds->busiest->__cpu_power <
3622 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3623 tmp = sg_div_cpu_power(sds->this,
3624 sds->max_load * sds->busiest->__cpu_power);
3625 else
3626 tmp = sg_div_cpu_power(sds->this,
3627 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3628 pwr_move += sds->this->__cpu_power *
3629 min(sds->this_load_per_task, sds->this_load + tmp);
3630 pwr_move /= SCHED_LOAD_SCALE;
3631
3632 /* Move if we gain throughput */
3633 if (pwr_move > pwr_now)
3634 *imbalance = sds->busiest_load_per_task;
3635}
3636
3637/**
3638 * calculate_imbalance - Calculate the amount of imbalance present within the
3639 * groups of a given sched_domain during load balance.
3640 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3641 * @this_cpu: Cpu for which currently load balance is being performed.
3642 * @imbalance: The variable to store the imbalance.
3643 */
3644static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3645 unsigned long *imbalance)
3646{
3647 unsigned long max_pull;
3312 /* 3648 /*
3313 * In the presence of smp nice balancing, certain scenarios can have 3649 * In the presence of smp nice balancing, certain scenarios can have
3314 * max load less than avg load(as we skip the groups at or below 3650 * max load less than avg load(as we skip the groups at or below
3315 * its cpu_power, while calculating max_load..) 3651 * its cpu_power, while calculating max_load..)
3316 */ 3652 */
3317 if (max_load < avg_load) { 3653 if (sds->max_load < sds->avg_load) {
3318 *imbalance = 0; 3654 *imbalance = 0;
3319 goto small_imbalance; 3655 return fix_small_imbalance(sds, this_cpu, imbalance);
3320 } 3656 }
3321 3657
3322 /* Don't want to pull so many tasks that a group would go idle */ 3658 /* Don't want to pull so many tasks that a group would go idle */
3323 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); 3659 max_pull = min(sds->max_load - sds->avg_load,
3660 sds->max_load - sds->busiest_load_per_task);
3324 3661
3325 /* How much load to actually move to equalise the imbalance */ 3662 /* How much load to actually move to equalise the imbalance */
3326 *imbalance = min(max_pull * busiest->__cpu_power, 3663 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3327 (avg_load - this_load) * this->__cpu_power) 3664 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
3328 / SCHED_LOAD_SCALE; 3665 / SCHED_LOAD_SCALE;
3329 3666
3330 /* 3667 /*
@@ -3333,78 +3670,110 @@ group_next:
3333 * a think about bumping its value to force at least one task to be 3670 * a think about bumping its value to force at least one task to be
3334 * moved 3671 * moved
3335 */ 3672 */
3336 if (*imbalance < busiest_load_per_task) { 3673 if (*imbalance < sds->busiest_load_per_task)
3337 unsigned long tmp, pwr_now, pwr_move; 3674 return fix_small_imbalance(sds, this_cpu, imbalance);
3338 unsigned int imbn;
3339
3340small_imbalance:
3341 pwr_move = pwr_now = 0;
3342 imbn = 2;
3343 if (this_nr_running) {
3344 this_load_per_task /= this_nr_running;
3345 if (busiest_load_per_task > this_load_per_task)
3346 imbn = 1;
3347 } else
3348 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3349 3675
3350 if (max_load - this_load + busiest_load_per_task >= 3676}
3351 busiest_load_per_task * imbn) { 3677/******* find_busiest_group() helpers end here *********************/
3352 *imbalance = busiest_load_per_task;
3353 return busiest;
3354 }
3355 3678
3356 /* 3679/**
3357 * OK, we don't have enough imbalance to justify moving tasks, 3680 * find_busiest_group - Returns the busiest group within the sched_domain
3358 * however we may be able to increase total CPU power used by 3681 * if there is an imbalance. If there isn't an imbalance, and
3359 * moving them. 3682 * the user has opted for power-savings, it returns a group whose
3360 */ 3683 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3684 * such a group exists.
3685 *
3686 * Also calculates the amount of weighted load which should be moved
3687 * to restore balance.
3688 *
3689 * @sd: The sched_domain whose busiest group is to be returned.
3690 * @this_cpu: The cpu for which load balancing is currently being performed.
3691 * @imbalance: Variable which stores amount of weighted load which should
3692 * be moved to restore balance/put a group to idle.
3693 * @idle: The idle status of this_cpu.
3694 * @sd_idle: The idleness of sd
3695 * @cpus: The set of CPUs under consideration for load-balancing.
3696 * @balance: Pointer to a variable indicating if this_cpu
3697 * is the appropriate cpu to perform load balancing at this_level.
3698 *
3699 * Returns: - the busiest group if imbalance exists.
3700 * - If no imbalance and user has opted for power-savings balance,
3701 * return the least loaded group whose CPUs can be
3702 * put to idle by rebalancing its tasks onto our group.
3703 */
3704static struct sched_group *
3705find_busiest_group(struct sched_domain *sd, int this_cpu,
3706 unsigned long *imbalance, enum cpu_idle_type idle,
3707 int *sd_idle, const struct cpumask *cpus, int *balance)
3708{
3709 struct sd_lb_stats sds;
3361 3710
3362 pwr_now += busiest->__cpu_power * 3711 memset(&sds, 0, sizeof(sds));
3363 min(busiest_load_per_task, max_load);
3364 pwr_now += this->__cpu_power *
3365 min(this_load_per_task, this_load);
3366 pwr_now /= SCHED_LOAD_SCALE;
3367
3368 /* Amount of load we'd subtract */
3369 tmp = sg_div_cpu_power(busiest,
3370 busiest_load_per_task * SCHED_LOAD_SCALE);
3371 if (max_load > tmp)
3372 pwr_move += busiest->__cpu_power *
3373 min(busiest_load_per_task, max_load - tmp);
3374
3375 /* Amount of load we'd add */
3376 if (max_load * busiest->__cpu_power <
3377 busiest_load_per_task * SCHED_LOAD_SCALE)
3378 tmp = sg_div_cpu_power(this,
3379 max_load * busiest->__cpu_power);
3380 else
3381 tmp = sg_div_cpu_power(this,
3382 busiest_load_per_task * SCHED_LOAD_SCALE);
3383 pwr_move += this->__cpu_power *
3384 min(this_load_per_task, this_load + tmp);
3385 pwr_move /= SCHED_LOAD_SCALE;
3386 3712
3387 /* Move if we gain throughput */ 3713 /*
3388 if (pwr_move > pwr_now) 3714 * Compute the various statistics relavent for load balancing at
3389 *imbalance = busiest_load_per_task; 3715 * this level.
3390 } 3716 */
3717 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3718 balance, &sds);
3719
3720 /* Cases where imbalance does not exist from POV of this_cpu */
3721 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3722 * at this level.
3723 * 2) There is no busy sibling group to pull from.
3724 * 3) This group is the busiest group.
3725 * 4) This group is more busy than the avg busieness at this
3726 * sched_domain.
3727 * 5) The imbalance is within the specified limit.
3728 * 6) Any rebalance would lead to ping-pong
3729 */
3730 if (balance && !(*balance))
3731 goto ret;
3391 3732
3392 return busiest; 3733 if (!sds.busiest || sds.busiest_nr_running == 0)
3734 goto out_balanced;
3393 3735
3394out_balanced: 3736 if (sds.this_load >= sds.max_load)
3395#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3737 goto out_balanced;
3396 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3397 goto ret;
3398 3738
3399 if (this == group_leader && group_leader != group_min) { 3739 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3400 *imbalance = min_load_per_task; 3740
3401 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3741 if (sds.this_load >= sds.avg_load)
3402 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3742 goto out_balanced;
3403 cpumask_first(sched_group_cpus(group_leader)); 3743
3404 } 3744 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3405 return group_min; 3745 goto out_balanced;
3406 } 3746
3407#endif 3747 sds.busiest_load_per_task /= sds.busiest_nr_running;
3748 if (sds.group_imb)
3749 sds.busiest_load_per_task =
3750 min(sds.busiest_load_per_task, sds.avg_load);
3751
3752 /*
3753 * We're trying to get all the cpus to the average_load, so we don't
3754 * want to push ourselves above the average load, nor do we wish to
3755 * reduce the max loaded cpu below the average load, as either of these
3756 * actions would just result in more rebalancing later, and ping-pong
3757 * tasks around. Thus we look for the minimum possible imbalance.
3758 * Negative imbalances (*we* are more loaded than anyone else) will
3759 * be counted as no imbalance for these purposes -- we can't fix that
3760 * by pulling tasks to us. Be careful of negative numbers as they'll
3761 * appear as very large values with unsigned longs.
3762 */
3763 if (sds.max_load <= sds.busiest_load_per_task)
3764 goto out_balanced;
3765
3766 /* Looks like there is an imbalance. Compute it */
3767 calculate_imbalance(&sds, this_cpu, imbalance);
3768 return sds.busiest;
3769
3770out_balanced:
3771 /*
3772 * There is no obvious imbalance. But check if we can do some balancing
3773 * to save power.
3774 */
3775 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3776 return sds.busiest;
3408ret: 3777ret:
3409 *imbalance = 0; 3778 *imbalance = 0;
3410 return NULL; 3779 return NULL;
@@ -4057,6 +4426,11 @@ static void run_rebalance_domains(struct softirq_action *h)
4057#endif 4426#endif
4058} 4427}
4059 4428
4429static inline int on_null_domain(int cpu)
4430{
4431 return !rcu_dereference(cpu_rq(cpu)->sd);
4432}
4433
4060/* 4434/*
4061 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 4435 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4062 * 4436 *
@@ -4114,7 +4488,9 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4114 cpumask_test_cpu(cpu, nohz.cpu_mask)) 4488 cpumask_test_cpu(cpu, nohz.cpu_mask))
4115 return; 4489 return;
4116#endif 4490#endif
4117 if (time_after_eq(jiffies, rq->next_balance)) 4491 /* Don't need to rebalance while attached to NULL domain */
4492 if (time_after_eq(jiffies, rq->next_balance) &&
4493 likely(!on_null_domain(cpu)))
4118 raise_softirq(SCHED_SOFTIRQ); 4494 raise_softirq(SCHED_SOFTIRQ);
4119} 4495}
4120 4496
@@ -4508,11 +4884,33 @@ static inline void schedule_debug(struct task_struct *prev)
4508#endif 4884#endif
4509} 4885}
4510 4886
4887static void put_prev_task(struct rq *rq, struct task_struct *prev)
4888{
4889 if (prev->state == TASK_RUNNING) {
4890 u64 runtime = prev->se.sum_exec_runtime;
4891
4892 runtime -= prev->se.prev_sum_exec_runtime;
4893 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
4894
4895 /*
4896 * In order to avoid avg_overlap growing stale when we are
4897 * indeed overlapping and hence not getting put to sleep, grow
4898 * the avg_overlap on preemption.
4899 *
4900 * We use the average preemption runtime because that
4901 * correlates to the amount of cache footprint a task can
4902 * build up.
4903 */
4904 update_avg(&prev->se.avg_overlap, runtime);
4905 }
4906 prev->sched_class->put_prev_task(rq, prev);
4907}
4908
4511/* 4909/*
4512 * Pick up the highest-prio task: 4910 * Pick up the highest-prio task:
4513 */ 4911 */
4514static inline struct task_struct * 4912static inline struct task_struct *
4515pick_next_task(struct rq *rq, struct task_struct *prev) 4913pick_next_task(struct rq *rq)
4516{ 4914{
4517 const struct sched_class *class; 4915 const struct sched_class *class;
4518 struct task_struct *p; 4916 struct task_struct *p;
@@ -4586,8 +4984,8 @@ need_resched_nonpreemptible:
4586 if (unlikely(!rq->nr_running)) 4984 if (unlikely(!rq->nr_running))
4587 idle_balance(cpu, rq); 4985 idle_balance(cpu, rq);
4588 4986
4589 prev->sched_class->put_prev_task(rq, prev); 4987 put_prev_task(rq, prev);
4590 next = pick_next_task(rq, prev); 4988 next = pick_next_task(rq);
4591 4989
4592 if (likely(prev != next)) { 4990 if (likely(prev != next)) {
4593 sched_info_switch(prev, next); 4991 sched_info_switch(prev, next);
@@ -4642,7 +5040,7 @@ asmlinkage void __sched preempt_schedule(void)
4642 * between schedule and now. 5040 * between schedule and now.
4643 */ 5041 */
4644 barrier(); 5042 barrier();
4645 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5043 } while (need_resched());
4646} 5044}
4647EXPORT_SYMBOL(preempt_schedule); 5045EXPORT_SYMBOL(preempt_schedule);
4648 5046
@@ -4671,7 +5069,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4671 * between schedule and now. 5069 * between schedule and now.
4672 */ 5070 */
4673 barrier(); 5071 barrier();
4674 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5072 } while (need_resched());
4675} 5073}
4676 5074
4677#endif /* CONFIG_PREEMPT */ 5075#endif /* CONFIG_PREEMPT */
@@ -5145,7 +5543,7 @@ SYSCALL_DEFINE1(nice, int, increment)
5145 if (increment > 40) 5543 if (increment > 40)
5146 increment = 40; 5544 increment = 40;
5147 5545
5148 nice = PRIO_TO_NICE(current->static_prio) + increment; 5546 nice = TASK_NICE(current) + increment;
5149 if (nice < -20) 5547 if (nice < -20)
5150 nice = -20; 5548 nice = -20;
5151 if (nice > 19) 5549 if (nice > 19)
@@ -6418,7 +6816,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6418 if (!rq->nr_running) 6816 if (!rq->nr_running)
6419 break; 6817 break;
6420 update_rq_clock(rq); 6818 update_rq_clock(rq);
6421 next = pick_next_task(rq, rq->curr); 6819 next = pick_next_task(rq);
6422 if (!next) 6820 if (!next)
6423 break; 6821 break;
6424 next->sched_class->put_prev_task(rq, next); 6822 next->sched_class->put_prev_task(rq, next);
@@ -8213,11 +8611,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8213 __set_bit(MAX_RT_PRIO, array->bitmap); 8611 __set_bit(MAX_RT_PRIO, array->bitmap);
8214 8612
8215#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8613#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8216 rt_rq->highest_prio = MAX_RT_PRIO; 8614 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8615#ifdef CONFIG_SMP
8616 rt_rq->highest_prio.next = MAX_RT_PRIO;
8617#endif
8217#endif 8618#endif
8218#ifdef CONFIG_SMP 8619#ifdef CONFIG_SMP
8219 rt_rq->rt_nr_migratory = 0; 8620 rt_rq->rt_nr_migratory = 0;
8220 rt_rq->overloaded = 0; 8621 rt_rq->overloaded = 0;
8622 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8221#endif 8623#endif
8222 8624
8223 rt_rq->rt_time = 0; 8625 rt_rq->rt_time = 0;
@@ -9219,6 +9621,16 @@ static int sched_rt_global_constraints(void)
9219 9621
9220 return ret; 9622 return ret;
9221} 9623}
9624
9625int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9626{
9627 /* Don't accept realtime tasks when there is no way for them to run */
9628 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9629 return 0;
9630
9631 return 1;
9632}
9633
9222#else /* !CONFIG_RT_GROUP_SCHED */ 9634#else /* !CONFIG_RT_GROUP_SCHED */
9223static int sched_rt_global_constraints(void) 9635static int sched_rt_global_constraints(void)
9224{ 9636{
@@ -9312,8 +9724,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9312 struct task_struct *tsk) 9724 struct task_struct *tsk)
9313{ 9725{
9314#ifdef CONFIG_RT_GROUP_SCHED 9726#ifdef CONFIG_RT_GROUP_SCHED
9315 /* Don't accept realtime tasks when there is no way for them to run */ 9727 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9316 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
9317 return -EINVAL; 9728 return -EINVAL;
9318#else 9729#else
9319 /* We don't support RT-tasks being in separate groups */ 9730 /* We don't support RT-tasks being in separate groups */
@@ -9584,7 +9995,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9584 struct cpuacct *ca; 9995 struct cpuacct *ca;
9585 int cpu; 9996 int cpu;
9586 9997
9587 if (!cpuacct_subsys.active) 9998 if (unlikely(!cpuacct_subsys.active))
9588 return; 9999 return;
9589 10000
9590 cpu = task_cpu(tsk); 10001 cpu = task_cpu(tsk);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414cc..390f33234bd0 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
46 47
47struct sched_clock_data { 48struct sched_clock_data {
48 /* 49 /*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
87} 88}
88 89
89/* 90/*
90 * min,max except they take wrapping into account 91 * min, max except they take wrapping into account
91 */ 92 */
92 93
93static inline u64 wrap_min(u64 x, u64 y) 94static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
111 s64 delta = now - scd->tick_raw; 112 s64 delta = now - scd->tick_raw;
112 u64 clock, min_clock, max_clock; 113 u64 clock, min_clock, max_clock;
113 114
114 WARN_ON_ONCE(!irqs_disabled());
115
116 if (unlikely(delta < 0)) 115 if (unlikely(delta < 0))
117 delta = 0; 116 delta = 0;
118 117
119 /* 118 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 119 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 120 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 121 * scd->tick_gtod + TICK_NSEC);
123 */ 122 */
124 123
125 clock = scd->tick_gtod + delta; 124 clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 147
149u64 sched_clock_cpu(int cpu) 148u64 sched_clock_cpu(int cpu)
150{ 149{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 150 u64 now, clock, this_clock, remote_clock;
151 struct sched_clock_data *scd;
153 152
154 if (unlikely(!sched_clock_running)) 153 if (sched_clock_stable)
155 return 0ull; 154 return sched_clock();
156 155
156 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 158 now = sched_clock();
159 159
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
195 195
196void sched_clock_tick(void) 196void sched_clock_tick(void)
197{ 197{
198 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd;
199 u64 now, now_gtod; 199 u64 now, now_gtod;
200 200
201 if (sched_clock_stable)
202 return;
203
201 if (unlikely(!sched_clock_running)) 204 if (unlikely(!sched_clock_running))
202 return; 205 return;
203 206
204 WARN_ON_ONCE(!irqs_disabled()); 207 WARN_ON_ONCE(!irqs_disabled());
205 208
209 scd = this_scd();
206 now_gtod = ktime_to_ns(ktime_get()); 210 now_gtod = ktime_to_ns(ktime_get());
207 now = sched_clock(); 211 now = sched_clock();
208 212
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
250 return sched_clock(); 254 return sched_clock();
251} 255}
252 256
253#endif 257#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
254 258
255unsigned long long cpu_clock(int cpu) 259unsigned long long cpu_clock(int cpu)
256{ 260{
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e4169..467ca72f1657 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
272 P(nr_switches); 272 P(nr_switches);
273 P(nr_load_updates); 273 P(nr_load_updates);
274 P(nr_uninterruptible); 274 P(nr_uninterruptible);
275 SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
276 PN(next_balance); 275 PN(next_balance);
277 P(curr->pid); 276 P(curr->pid);
278 PN(clock); 277 PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
287#ifdef CONFIG_SCHEDSTATS 286#ifdef CONFIG_SCHEDSTATS
288#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
289 288
290 P(yld_exp_empty);
291 P(yld_act_empty);
292 P(yld_both_empty);
293 P(yld_count); 289 P(yld_count);
294 290
295 P(sched_switch); 291 P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
314 u64 now = ktime_to_ns(ktime_get()); 310 u64 now = ktime_to_ns(ktime_get());
315 int cpu; 311 int cpu;
316 312
317 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", 313 SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
318 init_utsname()->release, 314 init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "), 315 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version); 316 init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
325 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 321 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
326#define PN(x) \ 322#define PN(x) \
327 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 323 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
324 P(jiffies);
328 PN(sysctl_sched_latency); 325 PN(sysctl_sched_latency);
329 PN(sysctl_sched_min_granularity); 326 PN(sysctl_sched_min_granularity);
330 PN(sysctl_sched_wakeup_granularity); 327 PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 394 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 395 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 396 PN(se.avg_overlap);
397 PN(se.avg_wakeup);
400 398
401 nr_switches = p->nvcsw + p->nivcsw; 399 nr_switches = p->nvcsw + p->nivcsw;
402 400
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0566f2a03c42..3816f217f119 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1314,16 +1314,63 @@ out:
1314} 1314}
1315#endif /* CONFIG_SMP */ 1315#endif /* CONFIG_SMP */
1316 1316
1317static unsigned long wakeup_gran(struct sched_entity *se) 1317/*
1318 * Adaptive granularity
1319 *
1320 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1321 * with the limit of wakeup_gran -- when it never does a wakeup.
1322 *
1323 * So the smaller avg_wakeup is the faster we want this task to preempt,
1324 * but we don't want to treat the preemptee unfairly and therefore allow it
1325 * to run for at least the amount of time we'd like to run.
1326 *
1327 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1328 *
1329 * NOTE: we use *nr_running to scale with load, this nicely matches the
1330 * degrading latency on load.
1331 */
1332static unsigned long
1333adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1334{
1335 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1336 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1337 u64 gran = 0;
1338
1339 if (this_run < expected_wakeup)
1340 gran = expected_wakeup - this_run;
1341
1342 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1343}
1344
1345static unsigned long
1346wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1318{ 1347{
1319 unsigned long gran = sysctl_sched_wakeup_granularity; 1348 unsigned long gran = sysctl_sched_wakeup_granularity;
1320 1349
1350 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1351 gran = adaptive_gran(curr, se);
1352
1321 /* 1353 /*
1322 * More easily preempt - nice tasks, while not making it harder for 1354 * Since its curr running now, convert the gran from real-time
1323 * + nice tasks. 1355 * to virtual-time in his units.
1324 */ 1356 */
1325 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1357 if (sched_feat(ASYM_GRAN)) {
1326 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1358 /*
1359 * By using 'se' instead of 'curr' we penalize light tasks, so
1360 * they get preempted easier. That is, if 'se' < 'curr' then
1361 * the resulting gran will be larger, therefore penalizing the
1362 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1363 * be smaller, again penalizing the lighter task.
1364 *
1365 * This is especially important for buddies when the leftmost
1366 * task is higher priority than the buddy.
1367 */
1368 if (unlikely(se->load.weight != NICE_0_LOAD))
1369 gran = calc_delta_fair(gran, se);
1370 } else {
1371 if (unlikely(curr->load.weight != NICE_0_LOAD))
1372 gran = calc_delta_fair(gran, curr);
1373 }
1327 1374
1328 return gran; 1375 return gran;
1329} 1376}
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1350 if (vdiff <= 0) 1397 if (vdiff <= 0)
1351 return -1; 1398 return -1;
1352 1399
1353 gran = wakeup_gran(curr); 1400 gran = wakeup_gran(curr, se);
1354 if (vdiff > gran) 1401 if (vdiff > gran)
1355 return 1; 1402 return 1;
1356 1403
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index da5d93b5d2c6..76f61756e677 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
2SCHED_FEAT(NORMALIZED_SLEEPER, 1) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1)
3SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
4SCHED_FEAT(START_DEBIT, 1) 5SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 6SCHED_FEAT(AFFINE_WAKEUPS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index da932f4c8524..299d012b4394 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 1021 return next;
879} 1022}
880 1023
881static struct task_struct *pick_next_task_rt(struct rq *rq) 1024static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 1025{
883 struct sched_rt_entity *rt_se; 1026 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 1027 struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 1043
901 p = rt_task_of(rt_se); 1044 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 1045 p->se.exec_start = rq->clock;
1046
1047 return p;
1048}
1049
1050static struct task_struct *pick_next_task_rt(struct rq *rq)
1051{
1052 struct task_struct *p = _pick_next_task_rt(rq);
1053
1054 /* The running task is never eligible for pushing */
1055 if (p)
1056 dequeue_pushable_task(rq, p);
1057
903 return p; 1058 return p;
904} 1059}
905 1060
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 1062{
908 update_curr_rt(rq); 1063 update_curr_rt(rq);
909 p->se.exec_start = 0; 1064 p->se.exec_start = 0;
1065
1066 /*
1067 * The previous task needs to be made eligible for pushing
1068 * if it is still active
1069 */
1070 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1071 enqueue_pushable_task(rq, p);
910} 1072}
911 1073
912#ifdef CONFIG_SMP 1074#ifdef CONFIG_SMP
@@ -1080,7 +1242,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1080 } 1242 }
1081 1243
1082 /* If this rq is still suitable use it. */ 1244 /* If this rq is still suitable use it. */
1083 if (lowest_rq->rt.highest_prio > task->prio) 1245 if (lowest_rq->rt.highest_prio.curr > task->prio)
1084 break; 1246 break;
1085 1247
1086 /* try again */ 1248 /* try again */
@@ -1091,6 +1253,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1091 return lowest_rq; 1253 return lowest_rq;
1092} 1254}
1093 1255
1256static inline int has_pushable_tasks(struct rq *rq)
1257{
1258 return !plist_head_empty(&rq->rt.pushable_tasks);
1259}
1260
1261static struct task_struct *pick_next_pushable_task(struct rq *rq)
1262{
1263 struct task_struct *p;
1264
1265 if (!has_pushable_tasks(rq))
1266 return NULL;
1267
1268 p = plist_first_entry(&rq->rt.pushable_tasks,
1269 struct task_struct, pushable_tasks);
1270
1271 BUG_ON(rq->cpu != task_cpu(p));
1272 BUG_ON(task_current(rq, p));
1273 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1274
1275 BUG_ON(!p->se.on_rq);
1276 BUG_ON(!rt_task(p));
1277
1278 return p;
1279}
1280
1094/* 1281/*
1095 * If the current CPU has more than one RT task, see if the non 1282 * If the current CPU has more than one RT task, see if the non
1096 * running task can migrate over to a CPU that is running a task 1283 * running task can migrate over to a CPU that is running a task
@@ -1100,13 +1287,11 @@ static int push_rt_task(struct rq *rq)
1100{ 1287{
1101 struct task_struct *next_task; 1288 struct task_struct *next_task;
1102 struct rq *lowest_rq; 1289 struct rq *lowest_rq;
1103 int ret = 0;
1104 int paranoid = RT_MAX_TRIES;
1105 1290
1106 if (!rq->rt.overloaded) 1291 if (!rq->rt.overloaded)
1107 return 0; 1292 return 0;
1108 1293
1109 next_task = pick_next_highest_task_rt(rq, -1); 1294 next_task = pick_next_pushable_task(rq);
1110 if (!next_task) 1295 if (!next_task)
1111 return 0; 1296 return 0;
1112 1297
@@ -1135,16 +1320,34 @@ static int push_rt_task(struct rq *rq)
1135 struct task_struct *task; 1320 struct task_struct *task;
1136 /* 1321 /*
1137 * find lock_lowest_rq releases rq->lock 1322 * find lock_lowest_rq releases rq->lock
1138 * so it is possible that next_task has changed. 1323 * so it is possible that next_task has migrated.
1139 * If it has, then try again. 1324 *
1325 * We need to make sure that the task is still on the same
1326 * run-queue and is also still the next task eligible for
1327 * pushing.
1140 */ 1328 */
1141 task = pick_next_highest_task_rt(rq, -1); 1329 task = pick_next_pushable_task(rq);
1142 if (unlikely(task != next_task) && task && paranoid--) { 1330 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1143 put_task_struct(next_task); 1331 /*
1144 next_task = task; 1332 * If we get here, the task hasnt moved at all, but
1145 goto retry; 1333 * it has failed to push. We will not try again,
1334 * since the other cpus will pull from us when they
1335 * are ready.
1336 */
1337 dequeue_pushable_task(rq, next_task);
1338 goto out;
1146 } 1339 }
1147 goto out; 1340
1341 if (!task)
1342 /* No more tasks, just exit */
1343 goto out;
1344
1345 /*
1346 * Something has shifted, try again.
1347 */
1348 put_task_struct(next_task);
1349 next_task = task;
1350 goto retry;
1148 } 1351 }
1149 1352
1150 deactivate_task(rq, next_task, 0); 1353 deactivate_task(rq, next_task, 0);
@@ -1155,23 +1358,12 @@ static int push_rt_task(struct rq *rq)
1155 1358
1156 double_unlock_balance(rq, lowest_rq); 1359 double_unlock_balance(rq, lowest_rq);
1157 1360
1158 ret = 1;
1159out: 1361out:
1160 put_task_struct(next_task); 1362 put_task_struct(next_task);
1161 1363
1162 return ret; 1364 return 1;
1163} 1365}
1164 1366
1165/*
1166 * TODO: Currently we just use the second highest prio task on
1167 * the queue, and stop when it can't migrate (or there's
1168 * no more RT tasks). There may be a case where a lower
1169 * priority RT task has a different affinity than the
1170 * higher RT task. In this case the lower RT task could
1171 * possibly be able to migrate where as the higher priority
1172 * RT task could not. We currently ignore this issue.
1173 * Enhancements are welcome!
1174 */
1175static void push_rt_tasks(struct rq *rq) 1367static void push_rt_tasks(struct rq *rq)
1176{ 1368{
1177 /* push_rt_task will return true if it moved an RT */ 1369 /* push_rt_task will return true if it moved an RT */
@@ -1182,33 +1374,35 @@ static void push_rt_tasks(struct rq *rq)
1182static int pull_rt_task(struct rq *this_rq) 1374static int pull_rt_task(struct rq *this_rq)
1183{ 1375{
1184 int this_cpu = this_rq->cpu, ret = 0, cpu; 1376 int this_cpu = this_rq->cpu, ret = 0, cpu;
1185 struct task_struct *p, *next; 1377 struct task_struct *p;
1186 struct rq *src_rq; 1378 struct rq *src_rq;
1187 1379
1188 if (likely(!rt_overloaded(this_rq))) 1380 if (likely(!rt_overloaded(this_rq)))
1189 return 0; 1381 return 0;
1190 1382
1191 next = pick_next_task_rt(this_rq);
1192
1193 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1383 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1194 if (this_cpu == cpu) 1384 if (this_cpu == cpu)
1195 continue; 1385 continue;
1196 1386
1197 src_rq = cpu_rq(cpu); 1387 src_rq = cpu_rq(cpu);
1388
1389 /*
1390 * Don't bother taking the src_rq->lock if the next highest
1391 * task is known to be lower-priority than our current task.
1392 * This may look racy, but if this value is about to go
1393 * logically higher, the src_rq will push this task away.
1394 * And if its going logically lower, we do not care
1395 */
1396 if (src_rq->rt.highest_prio.next >=
1397 this_rq->rt.highest_prio.curr)
1398 continue;
1399
1198 /* 1400 /*
1199 * We can potentially drop this_rq's lock in 1401 * We can potentially drop this_rq's lock in
1200 * double_lock_balance, and another CPU could 1402 * double_lock_balance, and another CPU could
1201 * steal our next task - hence we must cause 1403 * alter this_rq
1202 * the caller to recalculate the next task
1203 * in that case:
1204 */ 1404 */
1205 if (double_lock_balance(this_rq, src_rq)) { 1405 double_lock_balance(this_rq, src_rq);
1206 struct task_struct *old_next = next;
1207
1208 next = pick_next_task_rt(this_rq);
1209 if (next != old_next)
1210 ret = 1;
1211 }
1212 1406
1213 /* 1407 /*
1214 * Are there still pullable RT tasks? 1408 * Are there still pullable RT tasks?
@@ -1222,7 +1416,7 @@ static int pull_rt_task(struct rq *this_rq)
1222 * Do we have an RT task that preempts 1416 * Do we have an RT task that preempts
1223 * the to-be-scheduled task? 1417 * the to-be-scheduled task?
1224 */ 1418 */
1225 if (p && (!next || (p->prio < next->prio))) { 1419 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1226 WARN_ON(p == src_rq->curr); 1420 WARN_ON(p == src_rq->curr);
1227 WARN_ON(!p->se.on_rq); 1421 WARN_ON(!p->se.on_rq);
1228 1422
@@ -1232,12 +1426,9 @@ static int pull_rt_task(struct rq *this_rq)
1232 * This is just that p is wakeing up and hasn't 1426 * This is just that p is wakeing up and hasn't
1233 * had a chance to schedule. We only pull 1427 * had a chance to schedule. We only pull
1234 * p if it is lower in priority than the 1428 * p if it is lower in priority than the
1235 * current task on the run queue or 1429 * current task on the run queue
1236 * this_rq next task is lower in prio than
1237 * the current task on that rq.
1238 */ 1430 */
1239 if (p->prio < src_rq->curr->prio || 1431 if (p->prio < src_rq->curr->prio)
1240 (next && next->prio < src_rq->curr->prio))
1241 goto skip; 1432 goto skip;
1242 1433
1243 ret = 1; 1434 ret = 1;
@@ -1250,13 +1441,7 @@ static int pull_rt_task(struct rq *this_rq)
1250 * case there's an even higher prio task 1441 * case there's an even higher prio task
1251 * in another runqueue. (low likelyhood 1442 * in another runqueue. (low likelyhood
1252 * but possible) 1443 * but possible)
1253 *
1254 * Update next so that we won't pick a task
1255 * on another cpu with a priority lower (or equal)
1256 * than the one we just picked.
1257 */ 1444 */
1258 next = p;
1259
1260 } 1445 }
1261 skip: 1446 skip:
1262 double_unlock_balance(this_rq, src_rq); 1447 double_unlock_balance(this_rq, src_rq);
@@ -1268,24 +1453,27 @@ static int pull_rt_task(struct rq *this_rq)
1268static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1453static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1269{ 1454{
1270 /* Try to pull RT tasks here if we lower this rq's prio */ 1455 /* Try to pull RT tasks here if we lower this rq's prio */
1271 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1456 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1272 pull_rt_task(rq); 1457 pull_rt_task(rq);
1273} 1458}
1274 1459
1460/*
1461 * assumes rq->lock is held
1462 */
1463static int needs_post_schedule_rt(struct rq *rq)
1464{
1465 return has_pushable_tasks(rq);
1466}
1467
1275static void post_schedule_rt(struct rq *rq) 1468static void post_schedule_rt(struct rq *rq)
1276{ 1469{
1277 /* 1470 /*
1278 * If we have more than one rt_task queued, then 1471 * This is only called if needs_post_schedule_rt() indicates that
1279 * see if we can push the other rt_tasks off to other CPUS. 1472 * we need to push tasks away
1280 * Note we may release the rq lock, and since
1281 * the lock was owned by prev, we need to release it
1282 * first via finish_lock_switch and then reaquire it here.
1283 */ 1473 */
1284 if (unlikely(rq->rt.overloaded)) { 1474 spin_lock_irq(&rq->lock);
1285 spin_lock_irq(&rq->lock); 1475 push_rt_tasks(rq);
1286 push_rt_tasks(rq); 1476 spin_unlock_irq(&rq->lock);
1287 spin_unlock_irq(&rq->lock);
1288 }
1289} 1477}
1290 1478
1291/* 1479/*
@@ -1296,7 +1484,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1296{ 1484{
1297 if (!task_running(rq, p) && 1485 if (!task_running(rq, p) &&
1298 !test_tsk_need_resched(rq->curr) && 1486 !test_tsk_need_resched(rq->curr) &&
1299 rq->rt.overloaded) 1487 has_pushable_tasks(rq) &&
1488 p->rt.nr_cpus_allowed > 1)
1300 push_rt_tasks(rq); 1489 push_rt_tasks(rq);
1301} 1490}
1302 1491
@@ -1332,6 +1521,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1332 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1521 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1333 struct rq *rq = task_rq(p); 1522 struct rq *rq = task_rq(p);
1334 1523
1524 if (!task_current(rq, p)) {
1525 /*
1526 * Make sure we dequeue this task from the pushable list
1527 * before going further. It will either remain off of
1528 * the list because we are no longer pushable, or it
1529 * will be requeued.
1530 */
1531 if (p->rt.nr_cpus_allowed > 1)
1532 dequeue_pushable_task(rq, p);
1533
1534 /*
1535 * Requeue if our weight is changing and still > 1
1536 */
1537 if (weight > 1)
1538 enqueue_pushable_task(rq, p);
1539
1540 }
1541
1335 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1542 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1336 rq->rt.rt_nr_migratory++; 1543 rq->rt.rt_nr_migratory++;
1337 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1544 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1339,7 +1546,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1339 rq->rt.rt_nr_migratory--; 1546 rq->rt.rt_nr_migratory--;
1340 } 1547 }
1341 1548
1342 update_rt_migration(rq); 1549 update_rt_migration(&rq->rt);
1343 } 1550 }
1344 1551
1345 cpumask_copy(&p->cpus_allowed, new_mask); 1552 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1354,7 +1561,7 @@ static void rq_online_rt(struct rq *rq)
1354 1561
1355 __enable_runtime(rq); 1562 __enable_runtime(rq);
1356 1563
1357 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1564 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1358} 1565}
1359 1566
1360/* Assumes rq->lock is held */ 1567/* Assumes rq->lock is held */
@@ -1446,7 +1653,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1446 * can release the rq lock and p could migrate. 1653 * can release the rq lock and p could migrate.
1447 * Only reschedule if p is still on the same runqueue. 1654 * Only reschedule if p is still on the same runqueue.
1448 */ 1655 */
1449 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1656 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1450 resched_task(p); 1657 resched_task(p);
1451#else 1658#else
1452 /* For UP simply resched on drop of prio */ 1659 /* For UP simply resched on drop of prio */
@@ -1517,6 +1724,9 @@ static void set_curr_task_rt(struct rq *rq)
1517 struct task_struct *p = rq->curr; 1724 struct task_struct *p = rq->curr;
1518 1725
1519 p->se.exec_start = rq->clock; 1726 p->se.exec_start = rq->clock;
1727
1728 /* The running task is never eligible for pushing */
1729 dequeue_pushable_task(rq, p);
1520} 1730}
1521 1731
1522static const struct sched_class rt_sched_class = { 1732static const struct sched_class rt_sched_class = {
@@ -1539,6 +1749,7 @@ static const struct sched_class rt_sched_class = {
1539 .rq_online = rq_online_rt, 1749 .rq_online = rq_online_rt,
1540 .rq_offline = rq_offline_rt, 1750 .rq_offline = rq_offline_rt,
1541 .pre_schedule = pre_schedule_rt, 1751 .pre_schedule = pre_schedule_rt,
1752 .needs_post_schedule = needs_post_schedule_rt,
1542 .post_schedule = post_schedule_rt, 1753 .post_schedule = post_schedule_rt,
1543 .task_wake_up = task_wake_up_rt, 1754 .task_wake_up = task_wake_up_rt,
1544 .switched_from = switched_from_rt, 1755 .switched_from = switched_from_rt,
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a8f93dd374e1..32d2bd4061b0 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -4,7 +4,7 @@
4 * bump this up when changing the output format or the meaning of an existing 4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort) 5 * format, so that tools can adapt (or abort)
6 */ 6 */
7#define SCHEDSTAT_VERSION 14 7#define SCHEDSTAT_VERSION 15
8 8
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
26 26
27 /* runqueue-specific stats */ 27 /* runqueue-specific stats */
28 seq_printf(seq, 28 seq_printf(seq,
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", 29 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty, 30 cpu, rq->yld_count,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle, 31 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local, 32 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_cpu_time, 33 rq->rq_cpu_time,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a74fe87c0dd..1c8814481a11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1575,7 +1575,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1575 read_lock(&tasklist_lock); 1575 read_lock(&tasklist_lock);
1576 if (may_ptrace_stop()) { 1576 if (may_ptrace_stop()) {
1577 do_notify_parent_cldstop(current, CLD_TRAPPED); 1577 do_notify_parent_cldstop(current, CLD_TRAPPED);
1578 /*
1579 * Don't want to allow preemption here, because
1580 * sys_ptrace() needs this task to be inactive.
1581 *
1582 * XXX: implement read_unlock_no_resched().
1583 */
1584 preempt_disable();
1578 read_unlock(&tasklist_lock); 1585 read_unlock(&tasklist_lock);
1586 preempt_enable_no_resched();
1579 schedule(); 1587 schedule();
1580 } else { 1588 } else {
1581 /* 1589 /*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0365b4899a3d..57d3f67f6f38 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -626,6 +626,7 @@ static int ksoftirqd(void * __bind_cpu)
626 preempt_enable_no_resched(); 626 preempt_enable_no_resched();
627 cond_resched(); 627 cond_resched();
628 preempt_disable(); 628 preempt_disable();
629 rcu_qsctr_inc((long)__bind_cpu);
629 } 630 }
630 preempt_enable(); 631 preempt_enable();
631 set_current_state(TASK_INTERRUPTIBLE); 632 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/sys.c b/kernel/sys.c
index f145c415bc16..37f458e6882a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -559,7 +559,7 @@ error:
559 abort_creds(new); 559 abort_creds(new);
560 return retval; 560 return retval;
561} 561}
562 562
563/* 563/*
564 * change the user struct in a credentials set to match the new UID 564 * change the user struct in a credentials set to match the new UID
565 */ 565 */
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
571 if (!new_user) 571 if (!new_user)
572 return -EAGAIN; 572 return -EAGAIN;
573 573
574 if (!task_can_switch_user(new_user, current)) {
575 free_uid(new_user);
576 return -EINVAL;
577 }
578
574 if (atomic_read(&new_user->processes) >= 579 if (atomic_read(&new_user->processes) >=
575 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 580 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
576 new_user != INIT_USER) { 581 new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
631 goto error; 636 goto error;
632 } 637 }
633 638
634 retval = -EAGAIN; 639 if (new->uid != old->uid) {
635 if (new->uid != old->uid && set_user(new) < 0) 640 retval = set_user(new);
636 goto error; 641 if (retval < 0)
637 642 goto error;
643 }
638 if (ruid != (uid_t) -1 || 644 if (ruid != (uid_t) -1 ||
639 (euid != (uid_t) -1 && euid != old->uid)) 645 (euid != (uid_t) -1 && euid != old->uid))
640 new->suid = new->euid; 646 new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
680 retval = -EPERM; 686 retval = -EPERM;
681 if (capable(CAP_SETUID)) { 687 if (capable(CAP_SETUID)) {
682 new->suid = new->uid = uid; 688 new->suid = new->uid = uid;
683 if (uid != old->uid && set_user(new) < 0) { 689 if (uid != old->uid) {
684 retval = -EAGAIN; 690 retval = set_user(new);
685 goto error; 691 if (retval < 0)
692 goto error;
686 } 693 }
687 } else if (uid != old->uid && uid != new->suid) { 694 } else if (uid != old->uid && uid != new->suid) {
688 goto error; 695 goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
734 goto error; 741 goto error;
735 } 742 }
736 743
737 retval = -EAGAIN;
738 if (ruid != (uid_t) -1) { 744 if (ruid != (uid_t) -1) {
739 new->uid = ruid; 745 new->uid = ruid;
740 if (ruid != old->uid && set_user(new) < 0) 746 if (ruid != old->uid) {
741 goto error; 747 retval = set_user(new);
748 if (retval < 0)
749 goto error;
750 }
742 } 751 }
743 if (euid != (uid_t) -1) 752 if (euid != (uid_t) -1)
744 new->euid = euid; 753 new->euid = euid;
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index fafeb48f27c0..b38423ca711a 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, 219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, 220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, 221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
222 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
222 {} 223 {}
223}; 224};
224 225
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 905b0b50792d..0b0a6366c9d4 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o 1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
2 2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ea2f48af83cf..d13be216a790 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -68,6 +68,17 @@ void clockevents_set_mode(struct clock_event_device *dev,
68 if (dev->mode != mode) { 68 if (dev->mode != mode) {
69 dev->set_mode(mode, dev); 69 dev->set_mode(mode, dev);
70 dev->mode = mode; 70 dev->mode = mode;
71
72 /*
73 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
74 * on it, so fix it up and emit a warning:
75 */
76 if (mode == CLOCK_EVT_MODE_ONESHOT) {
77 if (unlikely(!dev->mult)) {
78 dev->mult = 1;
79 WARN_ON(1);
80 }
81 }
71 } 82 }
72} 83}
73 84
@@ -168,15 +179,6 @@ void clockevents_register_device(struct clock_event_device *dev)
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 179 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask); 180 BUG_ON(!dev->cpumask);
170 181
171 /*
172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
173 * on it, so fix it up and emit a warning:
174 */
175 if (unlikely(!dev->mult)) {
176 dev->mult = 1;
177 WARN_ON(1);
178 }
179
180 spin_lock(&clockevents_lock); 182 spin_lock(&clockevents_lock);
181 183
182 list_add(&dev->list, &clockevent_devices); 184 list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ca89e1593f08..c46c931a7fe7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -31,6 +31,82 @@
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32#include <linux/tick.h> 32#include <linux/tick.h>
33 33
34void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
36 u64 start_tstamp)
37{
38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
41}
42EXPORT_SYMBOL(timecounter_init);
43
44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
47 *
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
50 * calls.
51 *
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
54 */
55static u64 timecounter_read_delta(struct timecounter *tc)
56{
57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset;
59
60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc);
62
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65
66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68
69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now;
71
72 return ns_offset;
73}
74
75u64 timecounter_read(struct timecounter *tc)
76{
77 u64 nsec;
78
79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec;
82 tc->nsec = nsec;
83
84 return nsec;
85}
86EXPORT_SYMBOL(timecounter_read);
87
88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp)
90{
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec;
93
94 /*
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
98 */
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 }
105
106 return nsec;
107}
108EXPORT_SYMBOL(timecounter_cyc2time);
109
34/* XXX - Would like a better way for initializing curr_clocksource */ 110/* XXX - Would like a better way for initializing curr_clocksource */
35extern struct clocksource clocksource_jiffies; 111extern struct clocksource clocksource_jiffies;
36 112
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f5f793d92415..7fc64375ff43 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -1,71 +1,129 @@
1/* 1/*
2 * linux/kernel/time/ntp.c
3 *
4 * NTP state machine interfaces and logic. 2 * NTP state machine interfaces and logic.
5 * 3 *
6 * This code was mainly moved from kernel/timer.c and kernel/time.c 4 * This code was mainly moved from kernel/timer.c and kernel/time.c
7 * Please see those files for relevant copyright info and historical 5 * Please see those files for relevant copyright info and historical
8 * changelogs. 6 * changelogs.
9 */ 7 */
10
11#include <linux/mm.h>
12#include <linux/time.h>
13#include <linux/timex.h>
14#include <linux/jiffies.h>
15#include <linux/hrtimer.h>
16#include <linux/capability.h> 8#include <linux/capability.h>
17#include <linux/math64.h>
18#include <linux/clocksource.h> 9#include <linux/clocksource.h>
19#include <linux/workqueue.h> 10#include <linux/workqueue.h>
20#include <asm/timex.h> 11#include <linux/hrtimer.h>
12#include <linux/jiffies.h>
13#include <linux/math64.h>
14#include <linux/timex.h>
15#include <linux/time.h>
16#include <linux/mm.h>
21 17
22/* 18/*
23 * Timekeeping variables 19 * NTP timekeeping variables:
24 */ 20 */
25unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
26unsigned long tick_nsec; /* ACTHZ period (nsec) */
27u64 tick_length;
28static u64 tick_length_base;
29 21
30static struct hrtimer leap_timer; 22/* USER_HZ period (usecs): */
23unsigned long tick_usec = TICK_USEC;
31 24
32#define MAX_TICKADJ 500 /* microsecs */ 25/* ACTHZ period (nsecs): */
33#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 26unsigned long tick_nsec;
34 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 27
28u64 tick_length;
29static u64 tick_length_base;
30
31static struct hrtimer leap_timer;
32
33#define MAX_TICKADJ 500LL /* usecs */
34#define MAX_TICKADJ_SCALED \
35 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
35 36
36/* 37/*
37 * phase-lock loop variables 38 * phase-lock loop variables
38 */ 39 */
39/* TIME_ERROR prevents overwriting the CMOS clock */
40static int time_state = TIME_OK; /* clock synchronization status */
41int time_status = STA_UNSYNC; /* clock status bits */
42static long time_tai; /* TAI offset (s) */
43static s64 time_offset; /* time adjustment (ns) */
44static long time_constant = 2; /* pll time constant */
45long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
46long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
47static s64 time_freq; /* frequency offset (scaled ns/s)*/
48static long time_reftime; /* time at last adjustment (s) */
49long time_adjust;
50static long ntp_tick_adj;
51 40
41/*
42 * clock synchronization status
43 *
44 * (TIME_ERROR prevents overwriting the CMOS clock)
45 */
46static int time_state = TIME_OK;
47
48/* clock status bits: */
49int time_status = STA_UNSYNC;
50
51/* TAI offset (secs): */
52static long time_tai;
53
54/* time adjustment (nsecs): */
55static s64 time_offset;
56
57/* pll time constant: */
58static long time_constant = 2;
59
60/* maximum error (usecs): */
61long time_maxerror = NTP_PHASE_LIMIT;
62
63/* estimated error (usecs): */
64long time_esterror = NTP_PHASE_LIMIT;
65
66/* frequency offset (scaled nsecs/secs): */
67static s64 time_freq;
68
69/* time at last adjustment (secs): */
70static long time_reftime;
71
72long time_adjust;
73
74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
75static s64 ntp_tick_adj;
76
77/*
78 * NTP methods:
79 */
80
81/*
82 * Update (tick_length, tick_length_base, tick_nsec), based
83 * on (tick_usec, ntp_tick_adj, time_freq):
84 */
52static void ntp_update_frequency(void) 85static void ntp_update_frequency(void)
53{ 86{
54 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) 87 u64 second_length;
55 << NTP_SCALE_SHIFT; 88 u64 new_base;
56 second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; 89
57 second_length += time_freq; 90 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
91 << NTP_SCALE_SHIFT;
92
93 second_length += ntp_tick_adj;
94 second_length += time_freq;
58 95
59 tick_length_base = second_length; 96 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
97 new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
60 98
61 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; 99 /*
62 tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); 100 * Don't wait for the next second_overflow, apply
101 * the change to the tick length immediately:
102 */
103 tick_length += new_base - tick_length_base;
104 tick_length_base = new_base;
105}
106
107static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
108{
109 time_status &= ~STA_MODE;
110
111 if (secs < MINSEC)
112 return 0;
113
114 if (!(time_status & STA_FLL) && (secs <= MAXSEC))
115 return 0;
116
117 time_status |= STA_MODE;
118
119 return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
63} 120}
64 121
65static void ntp_update_offset(long offset) 122static void ntp_update_offset(long offset)
66{ 123{
67 long mtemp;
68 s64 freq_adj; 124 s64 freq_adj;
125 s64 offset64;
126 long secs;
69 127
70 if (!(time_status & STA_PLL)) 128 if (!(time_status & STA_PLL))
71 return; 129 return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
84 * Select how the frequency is to be controlled 142 * Select how the frequency is to be controlled
85 * and in which mode (PLL or FLL). 143 * and in which mode (PLL or FLL).
86 */ 144 */
87 if (time_status & STA_FREQHOLD || time_reftime == 0) 145 secs = xtime.tv_sec - time_reftime;
88 time_reftime = xtime.tv_sec; 146 if (unlikely(time_status & STA_FREQHOLD))
89 mtemp = xtime.tv_sec - time_reftime; 147 secs = 0;
148
90 time_reftime = xtime.tv_sec; 149 time_reftime = xtime.tv_sec;
91 150
92 freq_adj = (s64)offset * mtemp; 151 offset64 = offset;
93 freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); 152 freq_adj = (offset64 * secs) <<
94 time_status &= ~STA_MODE; 153 (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
95 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
96 freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
97 mtemp);
98 time_status |= STA_MODE;
99 }
100 freq_adj += time_freq;
101 freq_adj = min(freq_adj, MAXFREQ_SCALED);
102 time_freq = max(freq_adj, -MAXFREQ_SCALED);
103 154
104 time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); 155 freq_adj += ntp_update_offset_fll(offset64, secs);
156
157 freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
158
159 time_freq = max(freq_adj, -MAXFREQ_SCALED);
160
161 time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
105} 162}
106 163
107/** 164/**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
111 */ 168 */
112void ntp_clear(void) 169void ntp_clear(void)
113{ 170{
114 time_adjust = 0; /* stop active adjtime() */ 171 time_adjust = 0; /* stop active adjtime() */
115 time_status |= STA_UNSYNC; 172 time_status |= STA_UNSYNC;
116 time_maxerror = NTP_PHASE_LIMIT; 173 time_maxerror = NTP_PHASE_LIMIT;
117 time_esterror = NTP_PHASE_LIMIT; 174 time_esterror = NTP_PHASE_LIMIT;
118 175
119 ntp_update_frequency(); 176 ntp_update_frequency();
120 177
121 tick_length = tick_length_base; 178 tick_length = tick_length_base;
122 time_offset = 0; 179 time_offset = 0;
123} 180}
124 181
125/* 182/*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
140 xtime.tv_sec--; 197 xtime.tv_sec--;
141 wall_to_monotonic.tv_sec++; 198 wall_to_monotonic.tv_sec++;
142 time_state = TIME_OOP; 199 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 200 printk(KERN_NOTICE
144 "inserting leap second 23:59:60 UTC\n"); 201 "Clock: inserting leap second 23:59:60 UTC\n");
145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 202 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 res = HRTIMER_RESTART; 203 res = HRTIMER_RESTART;
147 break; 204 break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
150 time_tai--; 207 time_tai--;
151 wall_to_monotonic.tv_sec--; 208 wall_to_monotonic.tv_sec--;
152 time_state = TIME_WAIT; 209 time_state = TIME_WAIT;
153 printk(KERN_NOTICE "Clock: " 210 printk(KERN_NOTICE
154 "deleting leap second 23:59:59 UTC\n"); 211 "Clock: deleting leap second 23:59:59 UTC\n");
155 break; 212 break;
156 case TIME_OOP: 213 case TIME_OOP:
157 time_tai++; 214 time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
179 */ 236 */
180void second_overflow(void) 237void second_overflow(void)
181{ 238{
182 s64 time_adj; 239 s64 delta;
183 240
184 /* Bump the maxerror field */ 241 /* Bump the maxerror field */
185 time_maxerror += MAXFREQ / NSEC_PER_USEC; 242 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
192 * Compute the phase adjustment for the next second. The offset is 249 * Compute the phase adjustment for the next second. The offset is
193 * reduced by a fixed factor times the time constant. 250 * reduced by a fixed factor times the time constant.
194 */ 251 */
195 tick_length = tick_length_base; 252 tick_length = tick_length_base;
196 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); 253
197 time_offset -= time_adj; 254 delta = shift_right(time_offset, SHIFT_PLL + time_constant);
198 tick_length += time_adj; 255 time_offset -= delta;
199 256 tick_length += delta;
200 if (unlikely(time_adjust)) { 257
201 if (time_adjust > MAX_TICKADJ) { 258 if (!time_adjust)
202 time_adjust -= MAX_TICKADJ; 259 return;
203 tick_length += MAX_TICKADJ_SCALED; 260
204 } else if (time_adjust < -MAX_TICKADJ) { 261 if (time_adjust > MAX_TICKADJ) {
205 time_adjust += MAX_TICKADJ; 262 time_adjust -= MAX_TICKADJ;
206 tick_length -= MAX_TICKADJ_SCALED; 263 tick_length += MAX_TICKADJ_SCALED;
207 } else { 264 return;
208 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
209 NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
210 time_adjust = 0;
211 }
212 } 265 }
266
267 if (time_adjust < -MAX_TICKADJ) {
268 time_adjust += MAX_TICKADJ;
269 tick_length -= MAX_TICKADJ_SCALED;
270 return;
271 }
272
273 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
274 << NTP_SCALE_SHIFT;
275 time_adjust = 0;
213} 276}
214 277
215#ifdef CONFIG_GENERIC_CMOS_UPDATE 278#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
233 * This code is run on a timer. If the clock is set, that timer 296 * This code is run on a timer. If the clock is set, that timer
234 * may not expire at the correct time. Thus, we adjust... 297 * may not expire at the correct time. Thus, we adjust...
235 */ 298 */
236 if (!ntp_synced()) 299 if (!ntp_synced()) {
237 /* 300 /*
238 * Not synced, exit, do not restart a timer (if one is 301 * Not synced, exit, do not restart a timer (if one is
239 * running, let it run out). 302 * running, let it run out).
240 */ 303 */
241 return; 304 return;
305 }
242 306
243 getnstimeofday(&now); 307 getnstimeofday(&now);
244 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 308 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
270static inline void notify_cmos_timer(void) { } 334static inline void notify_cmos_timer(void) { }
271#endif 335#endif
272 336
273/* adjtimex mainly allows reading (and writing, if superuser) of 337/*
338 * Start the leap seconds timer:
339 */
340static inline void ntp_start_leap_timer(struct timespec *ts)
341{
342 long now = ts->tv_sec;
343
344 if (time_status & STA_INS) {
345 time_state = TIME_INS;
346 now += 86400 - now % 86400;
347 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
348
349 return;
350 }
351
352 if (time_status & STA_DEL) {
353 time_state = TIME_DEL;
354 now += 86400 - (now + 1) % 86400;
355 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
356 }
357}
358
359/*
360 * Propagate a new txc->status value into the NTP state:
361 */
362static inline void process_adj_status(struct timex *txc, struct timespec *ts)
363{
364 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
365 time_state = TIME_OK;
366 time_status = STA_UNSYNC;
367 }
368
369 /*
370 * If we turn on PLL adjustments then reset the
371 * reference time to current time.
372 */
373 if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
374 time_reftime = xtime.tv_sec;
375
376 /* only set allowed bits */
377 time_status &= STA_RONLY;
378 time_status |= txc->status & ~STA_RONLY;
379
380 switch (time_state) {
381 case TIME_OK:
382 ntp_start_leap_timer(ts);
383 break;
384 case TIME_INS:
385 case TIME_DEL:
386 time_state = TIME_OK;
387 ntp_start_leap_timer(ts);
388 case TIME_WAIT:
389 if (!(time_status & (STA_INS | STA_DEL)))
390 time_state = TIME_OK;
391 break;
392 case TIME_OOP:
393 hrtimer_restart(&leap_timer);
394 break;
395 }
396}
397/*
398 * Called with the xtime lock held, so we can access and modify
399 * all the global NTP state:
400 */
401static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
402{
403 if (txc->modes & ADJ_STATUS)
404 process_adj_status(txc, ts);
405
406 if (txc->modes & ADJ_NANO)
407 time_status |= STA_NANO;
408
409 if (txc->modes & ADJ_MICRO)
410 time_status &= ~STA_NANO;
411
412 if (txc->modes & ADJ_FREQUENCY) {
413 time_freq = txc->freq * PPM_SCALE;
414 time_freq = min(time_freq, MAXFREQ_SCALED);
415 time_freq = max(time_freq, -MAXFREQ_SCALED);
416 }
417
418 if (txc->modes & ADJ_MAXERROR)
419 time_maxerror = txc->maxerror;
420
421 if (txc->modes & ADJ_ESTERROR)
422 time_esterror = txc->esterror;
423
424 if (txc->modes & ADJ_TIMECONST) {
425 time_constant = txc->constant;
426 if (!(time_status & STA_NANO))
427 time_constant += 4;
428 time_constant = min(time_constant, (long)MAXTC);
429 time_constant = max(time_constant, 0l);
430 }
431
432 if (txc->modes & ADJ_TAI && txc->constant > 0)
433 time_tai = txc->constant;
434
435 if (txc->modes & ADJ_OFFSET)
436 ntp_update_offset(txc->offset);
437
438 if (txc->modes & ADJ_TICK)
439 tick_usec = txc->tick;
440
441 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
442 ntp_update_frequency();
443}
444
445/*
446 * adjtimex mainly allows reading (and writing, if superuser) of
274 * kernel time-keeping variables. used by xntpd. 447 * kernel time-keeping variables. used by xntpd.
275 */ 448 */
276int do_adjtimex(struct timex *txc) 449int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
291 if (txc->modes && !capable(CAP_SYS_TIME)) 464 if (txc->modes && !capable(CAP_SYS_TIME))
292 return -EPERM; 465 return -EPERM;
293 466
294 /* if the quartz is off by more than 10% something is VERY wrong! */ 467 /*
468 * if the quartz is off by more than 10% then
469 * something is VERY wrong!
470 */
295 if (txc->modes & ADJ_TICK && 471 if (txc->modes & ADJ_TICK &&
296 (txc->tick < 900000/USER_HZ || 472 (txc->tick < 900000/USER_HZ ||
297 txc->tick > 1100000/USER_HZ)) 473 txc->tick > 1100000/USER_HZ))
298 return -EINVAL; 474 return -EINVAL;
299 475
300 if (txc->modes & ADJ_STATUS && time_state != TIME_OK) 476 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
301 hrtimer_cancel(&leap_timer); 477 hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
305 481
306 write_seqlock_irq(&xtime_lock); 482 write_seqlock_irq(&xtime_lock);
307 483
308 /* If there are input parameters, then process them */
309 if (txc->modes & ADJ_ADJTIME) { 484 if (txc->modes & ADJ_ADJTIME) {
310 long save_adjust = time_adjust; 485 long save_adjust = time_adjust;
311 486
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
315 ntp_update_frequency(); 490 ntp_update_frequency();
316 } 491 }
317 txc->offset = save_adjust; 492 txc->offset = save_adjust;
318 goto adj_done; 493 } else {
319 }
320 if (txc->modes) {
321 long sec;
322
323 if (txc->modes & ADJ_STATUS) {
324 if ((time_status & STA_PLL) &&
325 !(txc->status & STA_PLL)) {
326 time_state = TIME_OK;
327 time_status = STA_UNSYNC;
328 }
329 /* only set allowed bits */
330 time_status &= STA_RONLY;
331 time_status |= txc->status & ~STA_RONLY;
332
333 switch (time_state) {
334 case TIME_OK:
335 start_timer:
336 sec = ts.tv_sec;
337 if (time_status & STA_INS) {
338 time_state = TIME_INS;
339 sec += 86400 - sec % 86400;
340 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
341 } else if (time_status & STA_DEL) {
342 time_state = TIME_DEL;
343 sec += 86400 - (sec + 1) % 86400;
344 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
345 }
346 break;
347 case TIME_INS:
348 case TIME_DEL:
349 time_state = TIME_OK;
350 goto start_timer;
351 break;
352 case TIME_WAIT:
353 if (!(time_status & (STA_INS | STA_DEL)))
354 time_state = TIME_OK;
355 break;
356 case TIME_OOP:
357 hrtimer_restart(&leap_timer);
358 break;
359 }
360 }
361
362 if (txc->modes & ADJ_NANO)
363 time_status |= STA_NANO;
364 if (txc->modes & ADJ_MICRO)
365 time_status &= ~STA_NANO;
366
367 if (txc->modes & ADJ_FREQUENCY) {
368 time_freq = (s64)txc->freq * PPM_SCALE;
369 time_freq = min(time_freq, MAXFREQ_SCALED);
370 time_freq = max(time_freq, -MAXFREQ_SCALED);
371 }
372
373 if (txc->modes & ADJ_MAXERROR)
374 time_maxerror = txc->maxerror;
375 if (txc->modes & ADJ_ESTERROR)
376 time_esterror = txc->esterror;
377
378 if (txc->modes & ADJ_TIMECONST) {
379 time_constant = txc->constant;
380 if (!(time_status & STA_NANO))
381 time_constant += 4;
382 time_constant = min(time_constant, (long)MAXTC);
383 time_constant = max(time_constant, 0l);
384 }
385
386 if (txc->modes & ADJ_TAI && txc->constant > 0)
387 time_tai = txc->constant;
388
389 if (txc->modes & ADJ_OFFSET)
390 ntp_update_offset(txc->offset);
391 if (txc->modes & ADJ_TICK)
392 tick_usec = txc->tick;
393 494
394 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 495 /* If there are input parameters, then process them: */
395 ntp_update_frequency(); 496 if (txc->modes)
396 } 497 process_adjtimex_modes(txc, &ts);
397 498
398 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 499 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
399 NTP_SCALE_SHIFT); 500 NTP_SCALE_SHIFT);
400 if (!(time_status & STA_NANO)) 501 if (!(time_status & STA_NANO))
401 txc->offset /= NSEC_PER_USEC; 502 txc->offset /= NSEC_PER_USEC;
503 }
402 504
403adj_done:
404 result = time_state; /* mostly `TIME_OK' */ 505 result = time_state; /* mostly `TIME_OK' */
405 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 506 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
406 result = TIME_ERROR; 507 result = TIME_ERROR;
407 508
408 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * 509 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
409 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); 510 PPM_SCALE_INV, NTP_SCALE_SHIFT);
410 txc->maxerror = time_maxerror; 511 txc->maxerror = time_maxerror;
411 txc->esterror = time_esterror; 512 txc->esterror = time_esterror;
412 txc->status = time_status; 513 txc->status = time_status;
@@ -425,6 +526,7 @@ adj_done:
425 txc->calcnt = 0; 526 txc->calcnt = 0;
426 txc->errcnt = 0; 527 txc->errcnt = 0;
427 txc->stbcnt = 0; 528 txc->stbcnt = 0;
529
428 write_sequnlock_irq(&xtime_lock); 530 write_sequnlock_irq(&xtime_lock);
429 531
430 txc->time.tv_sec = ts.tv_sec; 532 txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
440static int __init ntp_tick_adj_setup(char *str) 542static int __init ntp_tick_adj_setup(char *str)
441{ 543{
442 ntp_tick_adj = simple_strtol(str, NULL, 0); 544 ntp_tick_adj = simple_strtol(str, NULL, 0);
545 ntp_tick_adj <<= NTP_SCALE_SHIFT;
546
443 return 1; 547 return 1;
444} 548}
445 549
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
new file mode 100644
index 000000000000..71e7f1a19156
--- /dev/null
+++ b/kernel/time/timecompare.c
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2009 Intel Corporation.
3 * Author: Patrick Ohly <patrick.ohly@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/timecompare.h>
21#include <linux/module.h>
22#include <linux/math64.h>
23
24/*
25 * fixed point arithmetic scale factor for skew
26 *
27 * Usually one would measure skew in ppb (parts per billion, 1e9), but
28 * using a factor of 2 simplifies the math.
29 */
30#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
31
32ktime_t timecompare_transform(struct timecompare *sync,
33 u64 source_tstamp)
34{
35 u64 nsec;
36
37 nsec = source_tstamp + sync->offset;
38 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
39 TIMECOMPARE_SKEW_RESOLUTION;
40
41 return ns_to_ktime(nsec);
42}
43EXPORT_SYMBOL(timecompare_transform);
44
45int timecompare_offset(struct timecompare *sync,
46 s64 *offset,
47 u64 *source_tstamp)
48{
49 u64 start_source = 0, end_source = 0;
50 struct {
51 s64 offset;
52 s64 duration_target;
53 } buffer[10], sample, *samples;
54 int counter = 0, i;
55 int used;
56 int index;
57 int num_samples = sync->num_samples;
58
59 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
60 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
61 if (!samples) {
62 samples = buffer;
63 num_samples = sizeof(buffer)/sizeof(buffer[0]);
64 }
65 } else {
66 samples = buffer;
67 }
68
69 /* run until we have enough valid samples, but do not try forever */
70 i = 0;
71 counter = 0;
72 while (1) {
73 u64 ts;
74 ktime_t start, end;
75
76 start = sync->target();
77 ts = timecounter_read(sync->source);
78 end = sync->target();
79
80 if (!i)
81 start_source = ts;
82
83 /* ignore negative durations */
84 sample.duration_target = ktime_to_ns(ktime_sub(end, start));
85 if (sample.duration_target >= 0) {
86 /*
87 * assume symetric delay to and from source:
88 * average target time corresponds to measured
89 * source time
90 */
91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 -
93 ts;
94
95 /* simple insertion sort based on duration */
96 index = counter - 1;
97 while (index >= 0) {
98 if (samples[index].duration_target <
99 sample.duration_target)
100 break;
101 samples[index + 1] = samples[index];
102 index--;
103 }
104 samples[index + 1] = sample;
105 counter++;
106 }
107
108 i++;
109 if (counter >= num_samples || i >= 100000) {
110 end_source = ts;
111 break;
112 }
113 }
114
115 *source_tstamp = (end_source + start_source) / 2;
116
117 /* remove outliers by only using 75% of the samples */
118 used = counter * 3 / 4;
119 if (!used)
120 used = counter;
121 if (used) {
122 /* calculate average */
123 s64 off = 0;
124 for (index = 0; index < used; index++)
125 off += samples[index].offset;
126 *offset = div_s64(off, used);
127 }
128
129 if (samples && samples != buffer)
130 kfree(samples);
131
132 return used;
133}
134EXPORT_SYMBOL(timecompare_offset);
135
136void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp)
138{
139 s64 offset;
140 u64 average_time;
141
142 if (!timecompare_offset(sync, &offset, &average_time))
143 return;
144
145 if (!sync->last_update) {
146 sync->last_update = average_time;
147 sync->offset = offset;
148 sync->skew = 0;
149 } else {
150 s64 delta_nsec = average_time - sync->last_update;
151
152 /* avoid division by negative or small deltas */
153 if (delta_nsec >= 10000) {
154 s64 delta_offset_nsec = offset - sync->offset;
155 s64 skew; /* delta_offset_nsec *
156 TIMECOMPARE_SKEW_RESOLUTION /
157 delta_nsec */
158 u64 divisor;
159
160 /* div_s64() is limited to 32 bit divisor */
161 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
162 divisor = delta_nsec;
163 while (unlikely(divisor >= ((s64)1) << 32)) {
164 /* divide both by 2; beware, right shift
165 of negative value has undefined
166 behavior and can only be used for
167 the positive divisor */
168 skew = div_s64(skew, 2);
169 divisor >>= 1;
170 }
171 skew = div_s64(skew, divisor);
172
173 /*
174 * Calculate new overall skew as 4/16 the
175 * old value and 12/16 the new one. This is
176 * a rather arbitrary tradeoff between
177 * only using the latest measurement (0/16 and
178 * 16/16) and even more weight on past measurements.
179 */
180#define TIMECOMPARE_NEW_SKEW_PER_16 12
181 sync->skew =
182 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
183 sync->skew +
184 TIMECOMPARE_NEW_SKEW_PER_16 * skew,
185 16);
186 sync->last_update = average_time;
187 sync->offset = offset;
188 }
189 }
190}
191EXPORT_SYMBOL(__timecompare_update);
diff --git a/kernel/timer.c b/kernel/timer.c
index 13dd64fe143d..9b77fc9a9ac8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
589 } 589 }
590} 590}
591 591
592int __mod_timer(struct timer_list *timer, unsigned long expires) 592static inline int
593__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
593{ 594{
594 struct tvec_base *base, *new_base; 595 struct tvec_base *base, *new_base;
595 unsigned long flags; 596 unsigned long flags;
596 int ret = 0; 597 int ret;
598
599 ret = 0;
597 600
598 timer_stats_timer_set_start_info(timer); 601 timer_stats_timer_set_start_info(timer);
599 BUG_ON(!timer->function); 602 BUG_ON(!timer->function);
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
603 if (timer_pending(timer)) { 606 if (timer_pending(timer)) {
604 detach_timer(timer, 0); 607 detach_timer(timer, 0);
605 ret = 1; 608 ret = 1;
609 } else {
610 if (pending_only)
611 goto out_unlock;
606 } 612 }
607 613
608 debug_timer_activate(timer); 614 debug_timer_activate(timer);
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
629 635
630 timer->expires = expires; 636 timer->expires = expires;
631 internal_add_timer(base, timer); 637 internal_add_timer(base, timer);
638
639out_unlock:
632 spin_unlock_irqrestore(&base->lock, flags); 640 spin_unlock_irqrestore(&base->lock, flags);
633 641
634 return ret; 642 return ret;
635} 643}
636 644
637EXPORT_SYMBOL(__mod_timer);
638
639/** 645/**
640 * add_timer_on - start a timer on a particular CPU 646 * mod_timer_pending - modify a pending timer's timeout
641 * @timer: the timer to be added 647 * @timer: the pending timer to be modified
642 * @cpu: the CPU to start it on 648 * @expires: new timeout in jiffies
643 * 649 *
644 * This is not very scalable on SMP. Double adds are not possible. 650 * mod_timer_pending() is the same for pending timers as mod_timer(),
651 * but will not re-activate and modify already deleted timers.
652 *
653 * It is useful for unserialized use of timers.
645 */ 654 */
646void add_timer_on(struct timer_list *timer, int cpu) 655int mod_timer_pending(struct timer_list *timer, unsigned long expires)
647{ 656{
648 struct tvec_base *base = per_cpu(tvec_bases, cpu); 657 return __mod_timer(timer, expires, true);
649 unsigned long flags;
650
651 timer_stats_timer_set_start_info(timer);
652 BUG_ON(timer_pending(timer) || !timer->function);
653 spin_lock_irqsave(&base->lock, flags);
654 timer_set_base(timer, base);
655 debug_timer_activate(timer);
656 internal_add_timer(base, timer);
657 /*
658 * Check whether the other CPU is idle and needs to be
659 * triggered to reevaluate the timer wheel when nohz is
660 * active. We are protected against the other CPU fiddling
661 * with the timer by holding the timer base lock. This also
662 * makes sure that a CPU on the way to idle can not evaluate
663 * the timer wheel.
664 */
665 wake_up_idle_cpu(cpu);
666 spin_unlock_irqrestore(&base->lock, flags);
667} 658}
659EXPORT_SYMBOL(mod_timer_pending);
668 660
669/** 661/**
670 * mod_timer - modify a timer's timeout 662 * mod_timer - modify a timer's timeout
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
688 */ 680 */
689int mod_timer(struct timer_list *timer, unsigned long expires) 681int mod_timer(struct timer_list *timer, unsigned long expires)
690{ 682{
691 BUG_ON(!timer->function);
692
693 timer_stats_timer_set_start_info(timer);
694 /* 683 /*
695 * This is a common optimization triggered by the 684 * This is a common optimization triggered by the
696 * networking code - if the timer is re-modified 685 * networking code - if the timer is re-modified
@@ -699,12 +688,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
699 if (timer->expires == expires && timer_pending(timer)) 688 if (timer->expires == expires && timer_pending(timer))
700 return 1; 689 return 1;
701 690
702 return __mod_timer(timer, expires); 691 return __mod_timer(timer, expires, false);
703} 692}
704
705EXPORT_SYMBOL(mod_timer); 693EXPORT_SYMBOL(mod_timer);
706 694
707/** 695/**
696 * add_timer - start a timer
697 * @timer: the timer to be added
698 *
699 * The kernel will do a ->function(->data) callback from the
700 * timer interrupt at the ->expires point in the future. The
701 * current time is 'jiffies'.
702 *
703 * The timer's ->expires, ->function (and if the handler uses it, ->data)
704 * fields must be set prior calling this function.
705 *
706 * Timers with an ->expires field in the past will be executed in the next
707 * timer tick.
708 */
709void add_timer(struct timer_list *timer)
710{
711 BUG_ON(timer_pending(timer));
712 mod_timer(timer, timer->expires);
713}
714EXPORT_SYMBOL(add_timer);
715
716/**
717 * add_timer_on - start a timer on a particular CPU
718 * @timer: the timer to be added
719 * @cpu: the CPU to start it on
720 *
721 * This is not very scalable on SMP. Double adds are not possible.
722 */
723void add_timer_on(struct timer_list *timer, int cpu)
724{
725 struct tvec_base *base = per_cpu(tvec_bases, cpu);
726 unsigned long flags;
727
728 timer_stats_timer_set_start_info(timer);
729 BUG_ON(timer_pending(timer) || !timer->function);
730 spin_lock_irqsave(&base->lock, flags);
731 timer_set_base(timer, base);
732 debug_timer_activate(timer);
733 internal_add_timer(base, timer);
734 /*
735 * Check whether the other CPU is idle and needs to be
736 * triggered to reevaluate the timer wheel when nohz is
737 * active. We are protected against the other CPU fiddling
738 * with the timer by holding the timer base lock. This also
739 * makes sure that a CPU on the way to idle can not evaluate
740 * the timer wheel.
741 */
742 wake_up_idle_cpu(cpu);
743 spin_unlock_irqrestore(&base->lock, flags);
744}
745
746/**
708 * del_timer - deactive a timer. 747 * del_timer - deactive a timer.
709 * @timer: the timer to be deactivated 748 * @timer: the timer to be deactivated
710 * 749 *
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
733 772
734 return ret; 773 return ret;
735} 774}
736
737EXPORT_SYMBOL(del_timer); 775EXPORT_SYMBOL(del_timer);
738 776
739#ifdef CONFIG_SMP 777#ifdef CONFIG_SMP
@@ -767,7 +805,6 @@ out:
767 805
768 return ret; 806 return ret;
769} 807}
770
771EXPORT_SYMBOL(try_to_del_timer_sync); 808EXPORT_SYMBOL(try_to_del_timer_sync);
772 809
773/** 810/**
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
796 cpu_relax(); 833 cpu_relax();
797 } 834 }
798} 835}
799
800EXPORT_SYMBOL(del_timer_sync); 836EXPORT_SYMBOL(del_timer_sync);
801#endif 837#endif
802 838
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
1268 expire = timeout + jiffies; 1304 expire = timeout + jiffies;
1269 1305
1270 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1306 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1271 __mod_timer(&timer, expire); 1307 __mod_timer(&timer, expire, false);
1272 schedule(); 1308 schedule();
1273 del_singleshot_timer_sync(&timer); 1309 del_singleshot_timer_sync(&timer);
1274 1310
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 43f891b05a4b..00d59d048edf 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -122,8 +122,10 @@ void acct_update_integrals(struct task_struct *tsk)
122 if (likely(tsk->mm)) { 122 if (likely(tsk->mm)) {
123 cputime_t time, dtime; 123 cputime_t time, dtime;
124 struct timeval value; 124 struct timeval value;
125 unsigned long flags;
125 u64 delta; 126 u64 delta;
126 127
128 local_irq_save(flags);
127 time = tsk->stime + tsk->utime; 129 time = tsk->stime + tsk->utime;
128 dtime = cputime_sub(time, tsk->acct_timexpd); 130 dtime = cputime_sub(time, tsk->acct_timexpd);
129 jiffies_to_timeval(cputime_to_jiffies(dtime), &value); 131 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
@@ -131,10 +133,12 @@ void acct_update_integrals(struct task_struct *tsk)
131 delta = delta * USEC_PER_SEC + value.tv_usec; 133 delta = delta * USEC_PER_SEC + value.tv_usec;
132 134
133 if (delta == 0) 135 if (delta == 0)
134 return; 136 goto out;
135 tsk->acct_timexpd = time; 137 tsk->acct_timexpd = time;
136 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); 138 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
137 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 139 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
140 out:
141 local_irq_restore(flags);
138 } 142 }
139} 143}
140 144
diff --git a/kernel/user.c b/kernel/user.c
index 3551ac742395..850e0ba41c1e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -20,7 +20,7 @@
20 20
21struct user_namespace init_user_ns = { 21struct user_namespace init_user_ns = {
22 .kref = { 22 .kref = {
23 .refcount = ATOMIC_INIT(1), 23 .refcount = ATOMIC_INIT(2),
24 }, 24 },
25 .creator = &root_user, 25 .creator = &root_user,
26}; 26};
@@ -286,14 +286,12 @@ int __init uids_sysfs_init(void)
286/* work function to remove sysfs directory for a user and free up 286/* work function to remove sysfs directory for a user and free up
287 * corresponding structures. 287 * corresponding structures.
288 */ 288 */
289static void remove_user_sysfs_dir(struct work_struct *w) 289static void cleanup_user_struct(struct work_struct *w)
290{ 290{
291 struct user_struct *up = container_of(w, struct user_struct, work); 291 struct user_struct *up = container_of(w, struct user_struct, work);
292 unsigned long flags; 292 unsigned long flags;
293 int remove_user = 0; 293 int remove_user = 0;
294 294
295 if (up->user_ns != &init_user_ns)
296 return;
297 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() 295 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
298 * atomic. 296 * atomic.
299 */ 297 */
@@ -312,9 +310,11 @@ static void remove_user_sysfs_dir(struct work_struct *w)
312 if (!remove_user) 310 if (!remove_user)
313 goto done; 311 goto done;
314 312
315 kobject_uevent(&up->kobj, KOBJ_REMOVE); 313 if (up->user_ns == &init_user_ns) {
316 kobject_del(&up->kobj); 314 kobject_uevent(&up->kobj, KOBJ_REMOVE);
317 kobject_put(&up->kobj); 315 kobject_del(&up->kobj);
316 kobject_put(&up->kobj);
317 }
318 318
319 sched_destroy_user(up); 319 sched_destroy_user(up);
320 key_put(up->uid_keyring); 320 key_put(up->uid_keyring);
@@ -335,7 +335,7 @@ static void free_user(struct user_struct *up, unsigned long flags)
335 atomic_inc(&up->__count); 335 atomic_inc(&up->__count);
336 spin_unlock_irqrestore(&uidhash_lock, flags); 336 spin_unlock_irqrestore(&uidhash_lock, flags);
337 337
338 INIT_WORK(&up->work, remove_user_sysfs_dir); 338 INIT_WORK(&up->work, cleanup_user_struct);
339 schedule_work(&up->work); 339 schedule_work(&up->work);
340} 340}
341 341
@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
362 362
363#endif 363#endif
364 364
365#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
366/*
367 * We need to check if a setuid can take place. This function should be called
368 * before successfully completing the setuid.
369 */
370int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
371{
372
373 return sched_rt_can_attach(up->tg, tsk);
374
375}
376#else
377int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
378{
379 return 1;
380}
381#endif
382
365/* 383/*
366 * Locate the user_struct for the passed UID. If found, take a ref on it. The 384 * Locate the user_struct for the passed UID. If found, take a ref on it. The
367 * caller must undo that ref with free_uid(). 385 * caller must undo that ref with free_uid().