aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-11-26 09:03:27 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-26 09:05:21 -0500
commit22a867d81707b0a2720bb5f65255265b95d30526 (patch)
tree7ec19b155b50b13ae95244c2bfa16aea4920c4f6 /kernel
parent5bb6b1ea67a73f0665a41726dd7138977b992c6c (diff)
parent3561d43fd289f590fdae672e5eb831b8d5cf0bf6 (diff)
Merge commit 'v2.6.37-rc3' into sched/core
Merge reason: Pick up latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/kdb/kdb_main.c21
-rw-r--r--kernel/futex.c3
-rw-r--r--kernel/futex_compat.c3
-rw-r--r--kernel/pm_qos_params.c4
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/sched.c39
-rw-r--r--kernel/sched_fair.c40
-rw-r--r--kernel/sched_stoptask.c4
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/trace.c1
11 files changed, 84 insertions, 39 deletions
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 37755d621924..a6e729766821 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -82,7 +82,7 @@ static kdbtab_t kdb_base_commands[50];
82#define for_each_kdbcmd(cmd, num) \ 82#define for_each_kdbcmd(cmd, num) \
83 for ((cmd) = kdb_base_commands, (num) = 0; \ 83 for ((cmd) = kdb_base_commands, (num) = 0; \
84 num < kdb_max_commands; \ 84 num < kdb_max_commands; \
85 num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++) 85 num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++)
86 86
87typedef struct _kdbmsg { 87typedef struct _kdbmsg {
88 int km_diag; /* kdb diagnostic */ 88 int km_diag; /* kdb diagnostic */
@@ -646,7 +646,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
646 } 646 }
647 if (!s->usable) 647 if (!s->usable)
648 return KDB_NOTIMP; 648 return KDB_NOTIMP;
649 s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); 649 s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
650 if (!s->command) { 650 if (!s->command) {
651 kdb_printf("Could not allocate new kdb_defcmd table for %s\n", 651 kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
652 cmdstr); 652 cmdstr);
@@ -2361,7 +2361,7 @@ static int kdb_pid(int argc, const char **argv)
2361 */ 2361 */
2362static int kdb_ll(int argc, const char **argv) 2362static int kdb_ll(int argc, const char **argv)
2363{ 2363{
2364 int diag; 2364 int diag = 0;
2365 unsigned long addr; 2365 unsigned long addr;
2366 long offset = 0; 2366 long offset = 0;
2367 unsigned long va; 2367 unsigned long va;
@@ -2400,20 +2400,21 @@ static int kdb_ll(int argc, const char **argv)
2400 char buf[80]; 2400 char buf[80];
2401 2401
2402 if (KDB_FLAG(CMD_INTERRUPT)) 2402 if (KDB_FLAG(CMD_INTERRUPT))
2403 return 0; 2403 goto out;
2404 2404
2405 sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); 2405 sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
2406 diag = kdb_parse(buf); 2406 diag = kdb_parse(buf);
2407 if (diag) 2407 if (diag)
2408 return diag; 2408 goto out;
2409 2409
2410 addr = va + linkoffset; 2410 addr = va + linkoffset;
2411 if (kdb_getword(&va, addr, sizeof(va))) 2411 if (kdb_getword(&va, addr, sizeof(va)))
2412 return 0; 2412 goto out;
2413 } 2413 }
2414 kfree(command);
2415 2414
2416 return 0; 2415out:
2416 kfree(command);
2417 return diag;
2417} 2418}
2418 2419
2419static int kdb_kgdb(int argc, const char **argv) 2420static int kdb_kgdb(int argc, const char **argv)
@@ -2739,13 +2740,13 @@ int kdb_register_repeat(char *cmd,
2739 } 2740 }
2740 if (kdb_commands) { 2741 if (kdb_commands) {
2741 memcpy(new, kdb_commands, 2742 memcpy(new, kdb_commands,
2742 kdb_max_commands * sizeof(*new)); 2743 (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
2743 kfree(kdb_commands); 2744 kfree(kdb_commands);
2744 } 2745 }
2745 memset(new + kdb_max_commands, 0, 2746 memset(new + kdb_max_commands, 0,
2746 kdb_command_extend * sizeof(*new)); 2747 kdb_command_extend * sizeof(*new));
2747 kdb_commands = new; 2748 kdb_commands = new;
2748 kp = kdb_commands + kdb_max_commands; 2749 kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
2749 kdb_max_commands += kdb_command_extend; 2750 kdb_max_commands += kdb_command_extend;
2750 } 2751 }
2751 2752
diff --git a/kernel/futex.c b/kernel/futex.c
index 6c683b37f2ce..40a8777a27d0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2489,7 +2489,8 @@ void exit_robust_list(struct task_struct *curr)
2489{ 2489{
2490 struct robust_list_head __user *head = curr->robust_list; 2490 struct robust_list_head __user *head = curr->robust_list;
2491 struct robust_list __user *entry, *next_entry, *pending; 2491 struct robust_list __user *entry, *next_entry, *pending;
2492 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; 2492 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2493 unsigned int uninitialized_var(next_pi);
2493 unsigned long futex_offset; 2494 unsigned long futex_offset;
2494 int rc; 2495 int rc;
2495 2496
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 06da4dfc339b..a7934ac75e5b 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -49,7 +49,8 @@ void compat_exit_robust_list(struct task_struct *curr)
49{ 49{
50 struct compat_robust_list_head __user *head = curr->compat_robust_list; 50 struct compat_robust_list_head __user *head = curr->compat_robust_list;
51 struct robust_list __user *entry, *next_entry, *pending; 51 struct robust_list __user *entry, *next_entry, *pending;
52 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; 52 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
53 unsigned int uninitialized_var(next_pi);
53 compat_uptr_t uentry, next_uentry, upending; 54 compat_uptr_t uentry, next_uentry, upending;
54 compat_long_t futex_offset; 55 compat_long_t futex_offset;
55 int rc; 56 int rc;
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index c7a8f453919e..aeaa7f846821 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -121,10 +121,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
121 121
122 switch (o->type) { 122 switch (o->type) {
123 case PM_QOS_MIN: 123 case PM_QOS_MIN:
124 return plist_last(&o->requests)->prio; 124 return plist_first(&o->requests)->prio;
125 125
126 case PM_QOS_MAX: 126 case PM_QOS_MAX:
127 return plist_first(&o->requests)->prio; 127 return plist_last(&o->requests)->prio;
128 128
129 default: 129 default:
130 /* runtime check for not using enum */ 130 /* runtime check for not using enum */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 29bff6117abc..a5aff3ebad38 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -246,9 +246,13 @@ config PM_OPS
246 depends on PM_SLEEP || PM_RUNTIME 246 depends on PM_SLEEP || PM_RUNTIME
247 default y 247 default y
248 248
249config ARCH_HAS_OPP
250 bool
251
249config PM_OPP 252config PM_OPP
250 bool "Operating Performance Point (OPP) Layer library" 253 bool "Operating Performance Point (OPP) Layer library"
251 depends on PM 254 depends on PM
255 depends on ARCH_HAS_OPP
252 ---help--- 256 ---help---
253 SOCs have a standard set of tuples consisting of frequency and 257 SOCs have a standard set of tuples consisting of frequency and
254 voltage pairs that the device will support per voltage domain. This 258 voltage pairs that the device will support per voltage domain. This
diff --git a/kernel/sched.c b/kernel/sched.c
index 324afce0e223..3e8a7db951a6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -557,18 +557,8 @@ struct rq {
557 557
558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
559 559
560static inline
561void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
562{
563 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
564 560
565 /* 561static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
566 * A queue event has occurred, and we're going to schedule. In
567 * this case, we can save a useless back to back clock update.
568 */
569 if (test_tsk_need_resched(p))
570 rq->skip_clock_update = 1;
571}
572 562
573static inline int cpu_of(struct rq *rq) 563static inline int cpu_of(struct rq *rq)
574{ 564{
@@ -1980,6 +1970,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1980 p->sched_class->prio_changed(rq, p, oldprio, running); 1970 p->sched_class->prio_changed(rq, p, oldprio, running);
1981} 1971}
1982 1972
1973static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1974{
1975 const struct sched_class *class;
1976
1977 if (p->sched_class == rq->curr->sched_class) {
1978 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1979 } else {
1980 for_each_class(class) {
1981 if (class == rq->curr->sched_class)
1982 break;
1983 if (class == p->sched_class) {
1984 resched_task(rq->curr);
1985 break;
1986 }
1987 }
1988 }
1989
1990 /*
1991 * A queue event has occurred, and we're going to schedule. In
1992 * this case, we can save a useless back to back clock update.
1993 */
1994 if (test_tsk_need_resched(rq->curr))
1995 rq->skip_clock_update = 1;
1996}
1997
1983#ifdef CONFIG_SMP 1998#ifdef CONFIG_SMP
1984/* 1999/*
1985 * Is this task likely cache-hot: 2000 * Is this task likely cache-hot:
@@ -6737,6 +6752,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6737 if (cpu != group_first_cpu(sd->groups)) 6752 if (cpu != group_first_cpu(sd->groups))
6738 return; 6753 return;
6739 6754
6755 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
6756
6740 child = sd->child; 6757 child = sd->child;
6741 6758
6742 sd->groups->cpu_power = 0; 6759 sd->groups->cpu_power = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 82fd884b4e33..fdbdb5084c49 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1799,12 +1799,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1799 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1799 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1800 int scale = cfs_rq->nr_running >= sched_nr_latency; 1800 int scale = cfs_rq->nr_running >= sched_nr_latency;
1801 1801
1802 if (unlikely(rt_prio(p->prio)))
1803 goto preempt;
1804
1805 if (unlikely(p->sched_class != &fair_sched_class))
1806 return;
1807
1808 if (unlikely(se == pse)) 1802 if (unlikely(se == pse))
1809 return; 1803 return;
1810 1804
@@ -2226,13 +2220,16 @@ struct sd_lb_stats {
2226 unsigned long this_load_per_task; 2220 unsigned long this_load_per_task;
2227 unsigned long this_nr_running; 2221 unsigned long this_nr_running;
2228 unsigned long this_has_capacity; 2222 unsigned long this_has_capacity;
2223 unsigned int this_idle_cpus;
2229 2224
2230 /* Statistics of the busiest group */ 2225 /* Statistics of the busiest group */
2226 unsigned int busiest_idle_cpus;
2231 unsigned long max_load; 2227 unsigned long max_load;
2232 unsigned long busiest_load_per_task; 2228 unsigned long busiest_load_per_task;
2233 unsigned long busiest_nr_running; 2229 unsigned long busiest_nr_running;
2234 unsigned long busiest_group_capacity; 2230 unsigned long busiest_group_capacity;
2235 unsigned long busiest_has_capacity; 2231 unsigned long busiest_has_capacity;
2232 unsigned int busiest_group_weight;
2236 2233
2237 int group_imb; /* Is there imbalance in this sd */ 2234 int group_imb; /* Is there imbalance in this sd */
2238#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 2235#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2254,6 +2251,8 @@ struct sg_lb_stats {
2254 unsigned long sum_nr_running; /* Nr tasks running in the group */ 2251 unsigned long sum_nr_running; /* Nr tasks running in the group */
2255 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 2252 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2256 unsigned long group_capacity; 2253 unsigned long group_capacity;
2254 unsigned long idle_cpus;
2255 unsigned long group_weight;
2257 int group_imb; /* Is there an imbalance in the group ? */ 2256 int group_imb; /* Is there an imbalance in the group ? */
2258 int group_has_capacity; /* Is there extra capacity in the group? */ 2257 int group_has_capacity; /* Is there extra capacity in the group? */
2259}; 2258};
@@ -2622,7 +2621,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2622 sgs->group_load += load; 2621 sgs->group_load += load;
2623 sgs->sum_nr_running += rq->nr_running; 2622 sgs->sum_nr_running += rq->nr_running;
2624 sgs->sum_weighted_load += weighted_cpuload(i); 2623 sgs->sum_weighted_load += weighted_cpuload(i);
2625 2624 if (idle_cpu(i))
2625 sgs->idle_cpus++;
2626 } 2626 }
2627 2627
2628 /* 2628 /*
@@ -2660,6 +2660,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2660 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); 2660 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
2661 if (!sgs->group_capacity) 2661 if (!sgs->group_capacity)
2662 sgs->group_capacity = fix_small_capacity(sd, group); 2662 sgs->group_capacity = fix_small_capacity(sd, group);
2663 sgs->group_weight = group->group_weight;
2663 2664
2664 if (sgs->group_capacity > sgs->sum_nr_running) 2665 if (sgs->group_capacity > sgs->sum_nr_running)
2665 sgs->group_has_capacity = 1; 2666 sgs->group_has_capacity = 1;
@@ -2767,13 +2768,16 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2767 sds->this_nr_running = sgs.sum_nr_running; 2768 sds->this_nr_running = sgs.sum_nr_running;
2768 sds->this_load_per_task = sgs.sum_weighted_load; 2769 sds->this_load_per_task = sgs.sum_weighted_load;
2769 sds->this_has_capacity = sgs.group_has_capacity; 2770 sds->this_has_capacity = sgs.group_has_capacity;
2771 sds->this_idle_cpus = sgs.idle_cpus;
2770 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { 2772 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
2771 sds->max_load = sgs.avg_load; 2773 sds->max_load = sgs.avg_load;
2772 sds->busiest = sg; 2774 sds->busiest = sg;
2773 sds->busiest_nr_running = sgs.sum_nr_running; 2775 sds->busiest_nr_running = sgs.sum_nr_running;
2776 sds->busiest_idle_cpus = sgs.idle_cpus;
2774 sds->busiest_group_capacity = sgs.group_capacity; 2777 sds->busiest_group_capacity = sgs.group_capacity;
2775 sds->busiest_load_per_task = sgs.sum_weighted_load; 2778 sds->busiest_load_per_task = sgs.sum_weighted_load;
2776 sds->busiest_has_capacity = sgs.group_has_capacity; 2779 sds->busiest_has_capacity = sgs.group_has_capacity;
2780 sds->busiest_group_weight = sgs.group_weight;
2777 sds->group_imb = sgs.group_imb; 2781 sds->group_imb = sgs.group_imb;
2778 } 2782 }
2779 2783
@@ -3051,8 +3055,26 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3051 if (sds.this_load >= sds.avg_load) 3055 if (sds.this_load >= sds.avg_load)
3052 goto out_balanced; 3056 goto out_balanced;
3053 3057
3054 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) 3058 /*
3055 goto out_balanced; 3059 * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
3060 * And to check for busy balance use !idle_cpu instead of
3061 * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
3062 * even when they are idle.
3063 */
3064 if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
3065 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3066 goto out_balanced;
3067 } else {
3068 /*
3069 * This cpu is idle. If the busiest group load doesn't
3070 * have more tasks than the number of available cpu's and
3071 * there is no imbalance between this and busiest group
3072 * wrt to idle cpu's, it is balanced.
3073 */
3074 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3075 sds.busiest_nr_running <= sds.busiest_group_weight)
3076 goto out_balanced;
3077 }
3056 3078
3057force_balance: 3079force_balance:
3058 /* Looks like there is an imbalance. Compute it */ 3080 /* Looks like there is an imbalance. Compute it */
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 45bddc0c1048..2bf6b47058c1 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -19,14 +19,14 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p,
19static void 19static void
20check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) 20check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
21{ 21{
22 resched_task(rq->curr); /* we preempt everything */ 22 /* we're never preempted */
23} 23}
24 24
25static struct task_struct *pick_next_task_stop(struct rq *rq) 25static struct task_struct *pick_next_task_stop(struct rq *rq)
26{ 26{
27 struct task_struct *stop = rq->stop; 27 struct task_struct *stop = rq->stop;
28 28
29 if (stop && stop->state == TASK_RUNNING) 29 if (stop && stop->se.on_rq)
30 return stop; 30 return stop;
31 31
32 return NULL; 32 return NULL;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9b520d74f052..a00fdefd24ce 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -690,7 +690,6 @@ static struct ctl_table kern_table[] = {
690 .extra1 = &zero, 690 .extra1 = &zero,
691 .extra2 = &ten_thousand, 691 .extra2 = &ten_thousand,
692 }, 692 },
693#endif
694 { 693 {
695 .procname = "dmesg_restrict", 694 .procname = "dmesg_restrict",
696 .data = &dmesg_restrict, 695 .data = &dmesg_restrict,
@@ -700,6 +699,7 @@ static struct ctl_table kern_table[] = {
700 .extra1 = &zero, 699 .extra1 = &zero,
701 .extra2 = &one, 700 .extra2 = &one,
702 }, 701 },
702#endif
703 { 703 {
704 .procname = "ngroups_max", 704 .procname = "ngroups_max",
705 .data = &ngroups_max, 705 .data = &ngroups_max,
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e04b8bcdef88..ea37e2ff4164 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -126,7 +126,7 @@ if FTRACE
126config FUNCTION_TRACER 126config FUNCTION_TRACER
127 bool "Kernel Function Tracer" 127 bool "Kernel Function Tracer"
128 depends on HAVE_FUNCTION_TRACER 128 depends on HAVE_FUNCTION_TRACER
129 select FRAME_POINTER if (!ARM_UNWIND) 129 select FRAME_POINTER if !ARM_UNWIND && !S390
130 select KALLSYMS 130 select KALLSYMS
131 select GENERIC_TRACER 131 select GENERIC_TRACER
132 select CONTEXT_SWITCH_TRACER 132 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 82d9b8106cd0..042084157980 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -17,7 +17,6 @@
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/smp_lock.h>
21#include <linux/notifier.h> 20#include <linux/notifier.h>
22#include <linux/irqflags.h> 21#include <linux/irqflags.h>
23#include <linux/debugfs.h> 22#include <linux/debugfs.h>