aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c1
-rw-r--r--kernel/irq/Makefile3
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/sched.c62
-rw-r--r--kernel/sys_ni.c12
-rw-r--r--kernel/timer.c29
7 files changed, 79 insertions, 34 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f181ff4dd32e..d2a7296c8251 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,7 @@ int hrtimer_cancel(struct hrtimer *timer)
501 501
502 if (ret >= 0) 502 if (ret >= 0)
503 return ret; 503 return ret;
504 cpu_relax();
504 } 505 }
505} 506}
506 507
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2b33f852be3e..9f77f50d8143 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o migration.o 2obj-y := handle.o manage.o spurious.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 52a8655fa080..134f9f2e0e39 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,6 +1,5 @@
1#include <linux/irq.h>
2 1
3#if defined(CONFIG_GENERIC_PENDING_IRQ) 2#include <linux/irq.h>
4 3
5void set_pending_irq(unsigned int irq, cpumask_t mask) 4void set_pending_irq(unsigned int irq, cpumask_t mask)
6{ 5{
@@ -61,5 +60,3 @@ void move_native_irq(int irq)
61 } 60 }
62 cpus_clear(pending_irq_cpumask[irq]); 61 cpus_clear(pending_irq_cpumask[irq]);
63} 62}
64
65#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index f895c7c01d5b..cc2a4c9c36ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,6 @@ static int pause_on_oops_flag;
27static DEFINE_SPINLOCK(pause_on_oops_lock); 27static DEFINE_SPINLOCK(pause_on_oops_lock);
28 28
29int panic_timeout; 29int panic_timeout;
30EXPORT_SYMBOL(panic_timeout);
31 30
32ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 31ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
33 32
diff --git a/kernel/sched.c b/kernel/sched.c
index dd153d6f8a04..365f0b90b4de 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,13 +665,55 @@ static int effective_prio(task_t *p)
665} 665}
666 666
667/* 667/*
668 * We place interactive tasks back into the active array, if possible.
669 *
670 * To guarantee that this does not starve expired tasks we ignore the
671 * interactivity of a task if the first expired task had to wait more
672 * than a 'reasonable' amount of time. This deadline timeout is
673 * load-dependent, as the frequency of array switched decreases with
674 * increasing number of running tasks. We also ignore the interactivity
675 * if a better static_prio task has expired, and switch periodically
676 * regardless, to ensure that highly interactive tasks do not starve
677 * the less fortunate for unreasonably long periods.
678 */
679static inline int expired_starving(runqueue_t *rq)
680{
681 int limit;
682
683 /*
684 * Arrays were recently switched, all is well
685 */
686 if (!rq->expired_timestamp)
687 return 0;
688
689 limit = STARVATION_LIMIT * rq->nr_running;
690
691 /*
692 * It's time to switch arrays
693 */
694 if (jiffies - rq->expired_timestamp >= limit)
695 return 1;
696
697 /*
698 * There's a better selection in the expired array
699 */
700 if (rq->curr->static_prio > rq->best_expired_prio)
701 return 1;
702
703 /*
704 * All is well
705 */
706 return 0;
707}
708
709/*
668 * __activate_task - move a task to the runqueue. 710 * __activate_task - move a task to the runqueue.
669 */ 711 */
670static void __activate_task(task_t *p, runqueue_t *rq) 712static void __activate_task(task_t *p, runqueue_t *rq)
671{ 713{
672 prio_array_t *target = rq->active; 714 prio_array_t *target = rq->active;
673 715
674 if (batch_task(p)) 716 if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
675 target = rq->expired; 717 target = rq->expired;
676 enqueue_task(p, target); 718 enqueue_task(p, target);
677 rq->nr_running++; 719 rq->nr_running++;
@@ -2490,22 +2532,6 @@ unsigned long long current_sched_time(const task_t *tsk)
2490} 2532}
2491 2533
2492/* 2534/*
2493 * We place interactive tasks back into the active array, if possible.
2494 *
2495 * To guarantee that this does not starve expired tasks we ignore the
2496 * interactivity of a task if the first expired task had to wait more
2497 * than a 'reasonable' amount of time. This deadline timeout is
2498 * load-dependent, as the frequency of array switched decreases with
2499 * increasing number of running tasks. We also ignore the interactivity
2500 * if a better static_prio task has expired:
2501 */
2502#define EXPIRED_STARVING(rq) \
2503 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2504 (jiffies - (rq)->expired_timestamp >= \
2505 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2506 ((rq)->curr->static_prio > (rq)->best_expired_prio))
2507
2508/*
2509 * Account user cpu time to a process. 2535 * Account user cpu time to a process.
2510 * @p: the process that the cpu time gets accounted to 2536 * @p: the process that the cpu time gets accounted to
2511 * @hardirq_offset: the offset to subtract from hardirq_count() 2537 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
2640 2666
2641 if (!rq->expired_timestamp) 2667 if (!rq->expired_timestamp)
2642 rq->expired_timestamp = jiffies; 2668 rq->expired_timestamp = jiffies;
2643 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { 2669 if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
2644 enqueue_task(p, rq->expired); 2670 enqueue_task(p, rq->expired);
2645 if (p->static_prio < rq->best_expired_prio) 2671 if (p->static_prio < rq->best_expired_prio)
2646 rq->best_expired_prio = p->static_prio; 2672 rq->best_expired_prio = p->static_prio;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d82864c4a617..5433195040f1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -120,3 +120,15 @@ cond_syscall(sys32_sysctl);
120cond_syscall(ppc_rtas); 120cond_syscall(ppc_rtas);
121cond_syscall(sys_spu_run); 121cond_syscall(sys_spu_run);
122cond_syscall(sys_spu_create); 122cond_syscall(sys_spu_create);
123
124/* mmu depending weak syscall entries */
125cond_syscall(sys_mprotect);
126cond_syscall(sys_msync);
127cond_syscall(sys_mlock);
128cond_syscall(sys_munlock);
129cond_syscall(sys_mlockall);
130cond_syscall(sys_munlockall);
131cond_syscall(sys_mincore);
132cond_syscall(sys_madvise);
133cond_syscall(sys_mremap);
134cond_syscall(sys_remap_file_pages);
diff --git a/kernel/timer.c b/kernel/timer.c
index 471ab8710b8f..883773788836 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
81} ____cacheline_aligned_in_smp; 81} ____cacheline_aligned_in_smp;
82 82
83typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
87 88
88static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
89 struct timer_list *timer) 90 struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
1224{ 1225{
1225 int j; 1226 int j;
1226 tvec_base_t *base; 1227 tvec_base_t *base;
1228 static char __devinitdata tvec_base_done[NR_CPUS];
1227 1229
1228 base = per_cpu(tvec_bases, cpu); 1230 if (!tvec_base_done[cpu]) {
1229 if (!base) {
1230 static char boot_done; 1231 static char boot_done;
1231 1232
1232 /*
1233 * Cannot do allocation in init_timers as that runs before the
1234 * allocator initializes (and would waste memory if there are
1235 * more possible CPUs than will ever be installed/brought up).
1236 */
1237 if (boot_done) { 1233 if (boot_done) {
1234 /*
1235 * The APs use this path later in boot
1236 */
1238 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1237 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1239 cpu_to_node(cpu)); 1238 cpu_to_node(cpu));
1240 if (!base) 1239 if (!base)
1241 return -ENOMEM; 1240 return -ENOMEM;
1242 memset(base, 0, sizeof(*base)); 1241 memset(base, 0, sizeof(*base));
1242 per_cpu(tvec_bases, cpu) = base;
1243 } else { 1243 } else {
1244 base = &boot_tvec_bases; 1244 /*
1245 * This is for the boot CPU - we use compile-time
1246 * static initialisation because per-cpu memory isn't
1247 * ready yet and because the memory allocators are not
1248 * initialised either.
1249 */
1245 boot_done = 1; 1250 boot_done = 1;
1251 base = &boot_tvec_bases;
1246 } 1252 }
1247 per_cpu(tvec_bases, cpu) = base; 1253 tvec_base_done[cpu] = 1;
1254 } else {
1255 base = per_cpu(tvec_bases, cpu);
1248 } 1256 }
1257
1249 spin_lock_init(&base->lock); 1258 spin_lock_init(&base->lock);
1250 for (j = 0; j < TVN_SIZE; j++) { 1259 for (j = 0; j < TVN_SIZE; j++) {
1251 INIT_LIST_HEAD(base->tv5.vec + j); 1260 INIT_LIST_HEAD(base->tv5.vec + j);