diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/printk.c | 5 | ||||
| -rw-r--r-- | kernel/sched.c | 18 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 6 |
3 files changed, 18 insertions, 11 deletions
diff --git a/kernel/printk.c b/kernel/printk.c index e2129e83fd75..625d240d7ada 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -75,6 +75,8 @@ EXPORT_SYMBOL(oops_in_progress); | |||
| 75 | static DECLARE_MUTEX(console_sem); | 75 | static DECLARE_MUTEX(console_sem); |
| 76 | static DECLARE_MUTEX(secondary_console_sem); | 76 | static DECLARE_MUTEX(secondary_console_sem); |
| 77 | struct console *console_drivers; | 77 | struct console *console_drivers; |
| 78 | EXPORT_SYMBOL_GPL(console_drivers); | ||
| 79 | |||
| 78 | /* | 80 | /* |
| 79 | * This is used for debugging the mess that is the VT code by | 81 | * This is used for debugging the mess that is the VT code by |
| 80 | * keeping track if we have the console semaphore held. It's | 82 | * keeping track if we have the console semaphore held. It's |
| @@ -121,6 +123,8 @@ struct console_cmdline | |||
| 121 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; | 123 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; |
| 122 | static int selected_console = -1; | 124 | static int selected_console = -1; |
| 123 | static int preferred_console = -1; | 125 | static int preferred_console = -1; |
| 126 | int console_set_on_cmdline; | ||
| 127 | EXPORT_SYMBOL(console_set_on_cmdline); | ||
| 124 | 128 | ||
| 125 | /* Flag: console code may call schedule() */ | 129 | /* Flag: console code may call schedule() */ |
| 126 | static int console_may_schedule; | 130 | static int console_may_schedule; |
| @@ -890,6 +894,7 @@ static int __init console_setup(char *str) | |||
| 890 | *s = 0; | 894 | *s = 0; |
| 891 | 895 | ||
| 892 | __add_preferred_console(buf, idx, options, brl_options); | 896 | __add_preferred_console(buf, idx, options, brl_options); |
| 897 | console_set_on_cmdline = 1; | ||
| 893 | return 1; | 898 | return 1; |
| 894 | } | 899 | } |
| 895 | __setup("console=", console_setup); | 900 | __setup("console=", console_setup); |
diff --git a/kernel/sched.c b/kernel/sched.c index 4e2f60335656..8402944f715b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -6539,9 +6539,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
| 6539 | 6539 | ||
| 6540 | min_val = INT_MAX; | 6540 | min_val = INT_MAX; |
| 6541 | 6541 | ||
| 6542 | for (i = 0; i < MAX_NUMNODES; i++) { | 6542 | for (i = 0; i < nr_node_ids; i++) { |
| 6543 | /* Start at @node */ | 6543 | /* Start at @node */ |
| 6544 | n = (node + i) % MAX_NUMNODES; | 6544 | n = (node + i) % nr_node_ids; |
| 6545 | 6545 | ||
| 6546 | if (!nr_cpus_node(n)) | 6546 | if (!nr_cpus_node(n)) |
| 6547 | continue; | 6547 | continue; |
| @@ -6735,7 +6735,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
| 6735 | if (!sched_group_nodes) | 6735 | if (!sched_group_nodes) |
| 6736 | continue; | 6736 | continue; |
| 6737 | 6737 | ||
| 6738 | for (i = 0; i < MAX_NUMNODES; i++) { | 6738 | for (i = 0; i < nr_node_ids; i++) { |
| 6739 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 6739 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
| 6740 | 6740 | ||
| 6741 | *nodemask = node_to_cpumask(i); | 6741 | *nodemask = node_to_cpumask(i); |
| @@ -6928,7 +6928,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 6928 | /* | 6928 | /* |
| 6929 | * Allocate the per-node list of sched groups | 6929 | * Allocate the per-node list of sched groups |
| 6930 | */ | 6930 | */ |
| 6931 | sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), | 6931 | sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), |
| 6932 | GFP_KERNEL); | 6932 | GFP_KERNEL); |
| 6933 | if (!sched_group_nodes) { | 6933 | if (!sched_group_nodes) { |
| 6934 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 6934 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
| @@ -7067,7 +7067,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7067 | #endif | 7067 | #endif |
| 7068 | 7068 | ||
| 7069 | /* Set up physical groups */ | 7069 | /* Set up physical groups */ |
| 7070 | for (i = 0; i < MAX_NUMNODES; i++) { | 7070 | for (i = 0; i < nr_node_ids; i++) { |
| 7071 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7071 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
| 7072 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7072 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
| 7073 | 7073 | ||
| @@ -7091,7 +7091,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7091 | send_covered, tmpmask); | 7091 | send_covered, tmpmask); |
| 7092 | } | 7092 | } |
| 7093 | 7093 | ||
| 7094 | for (i = 0; i < MAX_NUMNODES; i++) { | 7094 | for (i = 0; i < nr_node_ids; i++) { |
| 7095 | /* Set up node groups */ | 7095 | /* Set up node groups */ |
| 7096 | struct sched_group *sg, *prev; | 7096 | struct sched_group *sg, *prev; |
| 7097 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7097 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
| @@ -7130,9 +7130,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7130 | cpus_or(*covered, *covered, *nodemask); | 7130 | cpus_or(*covered, *covered, *nodemask); |
| 7131 | prev = sg; | 7131 | prev = sg; |
| 7132 | 7132 | ||
| 7133 | for (j = 0; j < MAX_NUMNODES; j++) { | 7133 | for (j = 0; j < nr_node_ids; j++) { |
| 7134 | SCHED_CPUMASK_VAR(notcovered, allmasks); | 7134 | SCHED_CPUMASK_VAR(notcovered, allmasks); |
| 7135 | int n = (i + j) % MAX_NUMNODES; | 7135 | int n = (i + j) % nr_node_ids; |
| 7136 | node_to_cpumask_ptr(pnodemask, n); | 7136 | node_to_cpumask_ptr(pnodemask, n); |
| 7137 | 7137 | ||
| 7138 | cpus_complement(*notcovered, *covered); | 7138 | cpus_complement(*notcovered, *covered); |
| @@ -7185,7 +7185,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7185 | } | 7185 | } |
| 7186 | 7186 | ||
| 7187 | #ifdef CONFIG_NUMA | 7187 | #ifdef CONFIG_NUMA |
| 7188 | for (i = 0; i < MAX_NUMNODES; i++) | 7188 | for (i = 0; i < nr_node_ids; i++) |
| 7189 | init_numa_sched_groups_power(sched_group_nodes[i]); | 7189 | init_numa_sched_groups_power(sched_group_nodes[i]); |
| 7190 | 7190 | ||
| 7191 | if (sd_allnodes) { | 7191 | if (sd_allnodes) { |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 57a1f02e5ec0..67f80c261709 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | struct tick_device tick_broadcast_device; | 30 | struct tick_device tick_broadcast_device; |
| 31 | static cpumask_t tick_broadcast_mask; | 31 | static cpumask_t tick_broadcast_mask; |
| 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); | 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
| 33 | static int tick_broadcast_force; | ||
| 33 | 34 | ||
| 34 | #ifdef CONFIG_TICK_ONESHOT | 35 | #ifdef CONFIG_TICK_ONESHOT |
| 35 | static void tick_broadcast_clear_oneshot(int cpu); | 36 | static void tick_broadcast_clear_oneshot(int cpu); |
| @@ -232,10 +233,11 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 232 | CLOCK_EVT_MODE_SHUTDOWN); | 233 | CLOCK_EVT_MODE_SHUTDOWN); |
| 233 | } | 234 | } |
| 234 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 235 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| 235 | dev->features |= CLOCK_EVT_FEAT_DUMMY; | 236 | tick_broadcast_force = 1; |
| 236 | break; | 237 | break; |
| 237 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 238 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
| 238 | if (cpu_isset(cpu, tick_broadcast_mask)) { | 239 | if (!tick_broadcast_force && |
| 240 | cpu_isset(cpu, tick_broadcast_mask)) { | ||
| 239 | cpu_clear(cpu, tick_broadcast_mask); | 241 | cpu_clear(cpu, tick_broadcast_mask); |
| 240 | if (td->mode == TICKDEV_MODE_PERIODIC) | 242 | if (td->mode == TICKDEV_MODE_PERIODIC) |
| 241 | tick_setup_periodic(dev, 0); | 243 | tick_setup_periodic(dev, 0); |
