diff options
Diffstat (limited to 'arch/i386/kernel')
32 files changed, 548 insertions, 245 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 65656c033d70..5b9ed21216cf 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
| @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
| 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ |
| 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
| 9 | pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ | 9 | pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ |
| 10 | quirks.o i8237.o topology.o | 10 | quirks.o i8237.o topology.o alternative.o |
| 11 | 11 | ||
| 12 | obj-y += cpu/ | 12 | obj-y += cpu/ |
| 13 | obj-y += timers/ | 13 | obj-y += timers/ |
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c new file mode 100644 index 000000000000..5cbd6f99fb2a --- /dev/null +++ b/arch/i386/kernel/alternative.c | |||
| @@ -0,0 +1,321 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/spinlock.h> | ||
| 3 | #include <linux/list.h> | ||
| 4 | #include <asm/alternative.h> | ||
| 5 | #include <asm/sections.h> | ||
| 6 | |||
| 7 | #define DEBUG 0 | ||
| 8 | #if DEBUG | ||
| 9 | # define DPRINTK(fmt, args...) printk(fmt, args) | ||
| 10 | #else | ||
| 11 | # define DPRINTK(fmt, args...) | ||
| 12 | #endif | ||
| 13 | |||
| 14 | /* Use inline assembly to define this because the nops are defined | ||
| 15 | as inline assembly strings in the include files and we cannot | ||
| 16 | get them easily into strings. */ | ||
| 17 | asm("\t.data\nintelnops: " | ||
| 18 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | ||
| 19 | GENERIC_NOP7 GENERIC_NOP8); | ||
| 20 | asm("\t.data\nk8nops: " | ||
| 21 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | ||
| 22 | K8_NOP7 K8_NOP8); | ||
| 23 | asm("\t.data\nk7nops: " | ||
| 24 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | ||
| 25 | K7_NOP7 K7_NOP8); | ||
| 26 | |||
| 27 | extern unsigned char intelnops[], k8nops[], k7nops[]; | ||
| 28 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | ||
| 29 | NULL, | ||
| 30 | intelnops, | ||
| 31 | intelnops + 1, | ||
| 32 | intelnops + 1 + 2, | ||
| 33 | intelnops + 1 + 2 + 3, | ||
| 34 | intelnops + 1 + 2 + 3 + 4, | ||
| 35 | intelnops + 1 + 2 + 3 + 4 + 5, | ||
| 36 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 37 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 38 | }; | ||
| 39 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | ||
| 40 | NULL, | ||
| 41 | k8nops, | ||
| 42 | k8nops + 1, | ||
| 43 | k8nops + 1 + 2, | ||
| 44 | k8nops + 1 + 2 + 3, | ||
| 45 | k8nops + 1 + 2 + 3 + 4, | ||
| 46 | k8nops + 1 + 2 + 3 + 4 + 5, | ||
| 47 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 48 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 49 | }; | ||
| 50 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | ||
| 51 | NULL, | ||
| 52 | k7nops, | ||
| 53 | k7nops + 1, | ||
| 54 | k7nops + 1 + 2, | ||
| 55 | k7nops + 1 + 2 + 3, | ||
| 56 | k7nops + 1 + 2 + 3 + 4, | ||
| 57 | k7nops + 1 + 2 + 3 + 4 + 5, | ||
| 58 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 59 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 60 | }; | ||
| 61 | static struct nop { | ||
| 62 | int cpuid; | ||
| 63 | unsigned char **noptable; | ||
| 64 | } noptypes[] = { | ||
| 65 | { X86_FEATURE_K8, k8_nops }, | ||
| 66 | { X86_FEATURE_K7, k7_nops }, | ||
| 67 | { -1, NULL } | ||
| 68 | }; | ||
| 69 | |||
| 70 | |||
| 71 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | ||
| 72 | extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; | ||
| 73 | extern u8 *__smp_locks[], *__smp_locks_end[]; | ||
| 74 | |||
| 75 | extern u8 __smp_alt_begin[], __smp_alt_end[]; | ||
| 76 | |||
| 77 | |||
| 78 | static unsigned char** find_nop_table(void) | ||
| 79 | { | ||
| 80 | unsigned char **noptable = intel_nops; | ||
| 81 | int i; | ||
| 82 | |||
| 83 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | ||
| 84 | if (boot_cpu_has(noptypes[i].cpuid)) { | ||
| 85 | noptable = noptypes[i].noptable; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | return noptable; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Replace instructions with better alternatives for this CPU type. | ||
| 93 | This runs before SMP is initialized to avoid SMP problems with | ||
| 94 | self modifying code. This implies that assymetric systems where | ||
| 95 | APs have less capabilities than the boot processor are not handled. | ||
| 96 | Tough. Make sure you disable such features by hand. */ | ||
| 97 | |||
| 98 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | ||
| 99 | { | ||
| 100 | unsigned char **noptable = find_nop_table(); | ||
| 101 | struct alt_instr *a; | ||
| 102 | int diff, i, k; | ||
| 103 | |||
| 104 | DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); | ||
| 105 | for (a = start; a < end; a++) { | ||
| 106 | BUG_ON(a->replacementlen > a->instrlen); | ||
| 107 | if (!boot_cpu_has(a->cpuid)) | ||
| 108 | continue; | ||
| 109 | memcpy(a->instr, a->replacement, a->replacementlen); | ||
| 110 | diff = a->instrlen - a->replacementlen; | ||
| 111 | /* Pad the rest with nops */ | ||
| 112 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { | ||
| 113 | k = diff; | ||
| 114 | if (k > ASM_NOP_MAX) | ||
| 115 | k = ASM_NOP_MAX; | ||
| 116 | memcpy(a->instr + i, noptable[k], k); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end) | ||
| 122 | { | ||
| 123 | struct alt_instr *a; | ||
| 124 | |||
| 125 | DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end); | ||
| 126 | for (a = start; a < end; a++) { | ||
| 127 | memcpy(a->replacement + a->replacementlen, | ||
| 128 | a->instr, | ||
| 129 | a->instrlen); | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end) | ||
| 134 | { | ||
| 135 | struct alt_instr *a; | ||
| 136 | |||
| 137 | for (a = start; a < end; a++) { | ||
| 138 | memcpy(a->instr, | ||
| 139 | a->replacement + a->replacementlen, | ||
| 140 | a->instrlen); | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | ||
| 145 | { | ||
| 146 | u8 **ptr; | ||
| 147 | |||
| 148 | for (ptr = start; ptr < end; ptr++) { | ||
| 149 | if (*ptr < text) | ||
| 150 | continue; | ||
| 151 | if (*ptr > text_end) | ||
| 152 | continue; | ||
| 153 | **ptr = 0xf0; /* lock prefix */ | ||
| 154 | }; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | ||
| 158 | { | ||
| 159 | unsigned char **noptable = find_nop_table(); | ||
| 160 | u8 **ptr; | ||
| 161 | |||
| 162 | for (ptr = start; ptr < end; ptr++) { | ||
| 163 | if (*ptr < text) | ||
| 164 | continue; | ||
| 165 | if (*ptr > text_end) | ||
| 166 | continue; | ||
| 167 | **ptr = noptable[1][0]; | ||
| 168 | }; | ||
| 169 | } | ||
| 170 | |||
| 171 | struct smp_alt_module { | ||
| 172 | /* what is this ??? */ | ||
| 173 | struct module *mod; | ||
| 174 | char *name; | ||
| 175 | |||
| 176 | /* ptrs to lock prefixes */ | ||
| 177 | u8 **locks; | ||
| 178 | u8 **locks_end; | ||
| 179 | |||
| 180 | /* .text segment, needed to avoid patching init code ;) */ | ||
| 181 | u8 *text; | ||
| 182 | u8 *text_end; | ||
| 183 | |||
| 184 | struct list_head next; | ||
| 185 | }; | ||
| 186 | static LIST_HEAD(smp_alt_modules); | ||
| 187 | static DEFINE_SPINLOCK(smp_alt); | ||
| 188 | |||
| 189 | static int smp_alt_once = 0; | ||
| 190 | static int __init bootonly(char *str) | ||
| 191 | { | ||
| 192 | smp_alt_once = 1; | ||
| 193 | return 1; | ||
| 194 | } | ||
| 195 | __setup("smp-alt-boot", bootonly); | ||
| 196 | |||
| 197 | void alternatives_smp_module_add(struct module *mod, char *name, | ||
| 198 | void *locks, void *locks_end, | ||
| 199 | void *text, void *text_end) | ||
| 200 | { | ||
| 201 | struct smp_alt_module *smp; | ||
| 202 | unsigned long flags; | ||
| 203 | |||
| 204 | if (smp_alt_once) { | ||
| 205 | if (boot_cpu_has(X86_FEATURE_UP)) | ||
| 206 | alternatives_smp_unlock(locks, locks_end, | ||
| 207 | text, text_end); | ||
| 208 | return; | ||
| 209 | } | ||
| 210 | |||
| 211 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); | ||
| 212 | if (NULL == smp) | ||
| 213 | return; /* we'll run the (safe but slow) SMP code then ... */ | ||
| 214 | |||
| 215 | smp->mod = mod; | ||
| 216 | smp->name = name; | ||
| 217 | smp->locks = locks; | ||
| 218 | smp->locks_end = locks_end; | ||
| 219 | smp->text = text; | ||
| 220 | smp->text_end = text_end; | ||
| 221 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", | ||
| 222 | __FUNCTION__, smp->locks, smp->locks_end, | ||
| 223 | smp->text, smp->text_end, smp->name); | ||
| 224 | |||
| 225 | spin_lock_irqsave(&smp_alt, flags); | ||
| 226 | list_add_tail(&smp->next, &smp_alt_modules); | ||
| 227 | if (boot_cpu_has(X86_FEATURE_UP)) | ||
| 228 | alternatives_smp_unlock(smp->locks, smp->locks_end, | ||
| 229 | smp->text, smp->text_end); | ||
| 230 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 231 | } | ||
| 232 | |||
| 233 | void alternatives_smp_module_del(struct module *mod) | ||
| 234 | { | ||
| 235 | struct smp_alt_module *item; | ||
| 236 | unsigned long flags; | ||
| 237 | |||
| 238 | if (smp_alt_once) | ||
| 239 | return; | ||
| 240 | |||
| 241 | spin_lock_irqsave(&smp_alt, flags); | ||
| 242 | list_for_each_entry(item, &smp_alt_modules, next) { | ||
| 243 | if (mod != item->mod) | ||
| 244 | continue; | ||
| 245 | list_del(&item->next); | ||
| 246 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 247 | DPRINTK("%s: %s\n", __FUNCTION__, item->name); | ||
| 248 | kfree(item); | ||
| 249 | return; | ||
| 250 | } | ||
| 251 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 252 | } | ||
| 253 | |||
| 254 | void alternatives_smp_switch(int smp) | ||
| 255 | { | ||
| 256 | struct smp_alt_module *mod; | ||
| 257 | unsigned long flags; | ||
| 258 | |||
| 259 | if (smp_alt_once) | ||
| 260 | return; | ||
| 261 | BUG_ON(!smp && (num_online_cpus() > 1)); | ||
| 262 | |||
| 263 | spin_lock_irqsave(&smp_alt, flags); | ||
| 264 | if (smp) { | ||
| 265 | printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); | ||
| 266 | clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 267 | clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 268 | alternatives_smp_apply(__smp_alt_instructions, | ||
| 269 | __smp_alt_instructions_end); | ||
| 270 | list_for_each_entry(mod, &smp_alt_modules, next) | ||
| 271 | alternatives_smp_lock(mod->locks, mod->locks_end, | ||
| 272 | mod->text, mod->text_end); | ||
| 273 | } else { | ||
| 274 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | ||
| 275 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 276 | set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 277 | apply_alternatives(__smp_alt_instructions, | ||
| 278 | __smp_alt_instructions_end); | ||
| 279 | list_for_each_entry(mod, &smp_alt_modules, next) | ||
| 280 | alternatives_smp_unlock(mod->locks, mod->locks_end, | ||
| 281 | mod->text, mod->text_end); | ||
| 282 | } | ||
| 283 | spin_unlock_irqrestore(&smp_alt, flags); | ||
| 284 | } | ||
| 285 | |||
| 286 | void __init alternative_instructions(void) | ||
| 287 | { | ||
| 288 | apply_alternatives(__alt_instructions, __alt_instructions_end); | ||
| 289 | |||
| 290 | /* switch to patch-once-at-boottime-only mode and free the | ||
| 291 | * tables in case we know the number of CPUs will never ever | ||
| 292 | * change */ | ||
| 293 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 294 | if (num_possible_cpus() < 2) | ||
| 295 | smp_alt_once = 1; | ||
| 296 | #else | ||
| 297 | smp_alt_once = 1; | ||
| 298 | #endif | ||
| 299 | |||
| 300 | if (smp_alt_once) { | ||
| 301 | if (1 == num_possible_cpus()) { | ||
| 302 | printk(KERN_INFO "SMP alternatives: switching to UP code\n"); | ||
| 303 | set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); | ||
| 304 | set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); | ||
| 305 | apply_alternatives(__smp_alt_instructions, | ||
| 306 | __smp_alt_instructions_end); | ||
| 307 | alternatives_smp_unlock(__smp_locks, __smp_locks_end, | ||
| 308 | _text, _etext); | ||
| 309 | } | ||
| 310 | free_init_pages("SMP alternatives", | ||
| 311 | (unsigned long)__smp_alt_begin, | ||
| 312 | (unsigned long)__smp_alt_end); | ||
| 313 | } else { | ||
| 314 | alternatives_smp_save(__smp_alt_instructions, | ||
| 315 | __smp_alt_instructions_end); | ||
| 316 | alternatives_smp_module_add(NULL, "core kernel", | ||
| 317 | __smp_locks, __smp_locks_end, | ||
| 318 | _text, _etext); | ||
| 319 | alternatives_smp_switch(0); | ||
| 320 | } | ||
| 321 | } | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 776c90989e06..eb5279d23b7f 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <asm/i8253.h> | 38 | #include <asm/i8253.h> |
| 39 | 39 | ||
| 40 | #include <mach_apic.h> | 40 | #include <mach_apic.h> |
| 41 | #include <mach_apicdef.h> | ||
| 41 | #include <mach_ipi.h> | 42 | #include <mach_ipi.h> |
| 42 | 43 | ||
| 43 | #include "io_ports.h" | 44 | #include "io_ports.h" |
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c index f52669ecb93f..bd75629dd262 100644 --- a/arch/i386/kernel/cpu/centaur.c +++ b/arch/i386/kernel/cpu/centaur.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
| 5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
| 6 | #include <asm/e820.h> | 6 | #include <asm/e820.h> |
| 7 | #include <asm/mtrr.h> | ||
| 7 | #include "cpu.h" | 8 | #include "cpu.h" |
| 8 | 9 | ||
| 9 | #ifdef CONFIG_X86_OOSTORE | 10 | #ifdef CONFIG_X86_OOSTORE |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index e6bd095ae108..7e3d6b6a4e96 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
| @@ -25,9 +25,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | |||
| 25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | 25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); |
| 26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); |
| 27 | 27 | ||
| 28 | static int cachesize_override __devinitdata = -1; | 28 | static int cachesize_override __cpuinitdata = -1; |
| 29 | static int disable_x86_fxsr __devinitdata = 0; | 29 | static int disable_x86_fxsr __cpuinitdata; |
| 30 | static int disable_x86_serial_nr __devinitdata = 1; | 30 | static int disable_x86_serial_nr __cpuinitdata = 1; |
| 31 | static int disable_x86_sep __cpuinitdata; | ||
| 31 | 32 | ||
| 32 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 33 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; |
| 33 | 34 | ||
| @@ -59,7 +60,7 @@ static int __init cachesize_setup(char *str) | |||
| 59 | } | 60 | } |
| 60 | __setup("cachesize=", cachesize_setup); | 61 | __setup("cachesize=", cachesize_setup); |
| 61 | 62 | ||
| 62 | int __devinit get_model_name(struct cpuinfo_x86 *c) | 63 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
| 63 | { | 64 | { |
| 64 | unsigned int *v; | 65 | unsigned int *v; |
| 65 | char *p, *q; | 66 | char *p, *q; |
| @@ -89,7 +90,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c) | |||
| 89 | } | 90 | } |
| 90 | 91 | ||
| 91 | 92 | ||
| 92 | void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | 93 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
| 93 | { | 94 | { |
| 94 | unsigned int n, dummy, ecx, edx, l2size; | 95 | unsigned int n, dummy, ecx, edx, l2size; |
| 95 | 96 | ||
| @@ -130,7 +131,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
| 130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 131 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ |
| 131 | 132 | ||
| 132 | /* Look up CPU names by table lookup. */ | 133 | /* Look up CPU names by table lookup. */ |
| 133 | static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | 134 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
| 134 | { | 135 | { |
| 135 | struct cpu_model_info *info; | 136 | struct cpu_model_info *info; |
| 136 | 137 | ||
| @@ -151,7 +152,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 151 | } | 152 | } |
| 152 | 153 | ||
| 153 | 154 | ||
| 154 | static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 155 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) |
| 155 | { | 156 | { |
| 156 | char *v = c->x86_vendor_id; | 157 | char *v = c->x86_vendor_id; |
| 157 | int i; | 158 | int i; |
| @@ -187,6 +188,14 @@ static int __init x86_fxsr_setup(char * s) | |||
| 187 | __setup("nofxsr", x86_fxsr_setup); | 188 | __setup("nofxsr", x86_fxsr_setup); |
| 188 | 189 | ||
| 189 | 190 | ||
| 191 | static int __init x86_sep_setup(char * s) | ||
| 192 | { | ||
| 193 | disable_x86_sep = 1; | ||
| 194 | return 1; | ||
| 195 | } | ||
| 196 | __setup("nosep", x86_sep_setup); | ||
| 197 | |||
| 198 | |||
| 190 | /* Standard macro to see if a specific flag is changeable */ | 199 | /* Standard macro to see if a specific flag is changeable */ |
| 191 | static inline int flag_is_changeable_p(u32 flag) | 200 | static inline int flag_is_changeable_p(u32 flag) |
| 192 | { | 201 | { |
| @@ -210,7 +219,7 @@ static inline int flag_is_changeable_p(u32 flag) | |||
| 210 | 219 | ||
| 211 | 220 | ||
| 212 | /* Probe for the CPUID instruction */ | 221 | /* Probe for the CPUID instruction */ |
| 213 | static int __devinit have_cpuid_p(void) | 222 | static int __cpuinit have_cpuid_p(void) |
| 214 | { | 223 | { |
| 215 | return flag_is_changeable_p(X86_EFLAGS_ID); | 224 | return flag_is_changeable_p(X86_EFLAGS_ID); |
| 216 | } | 225 | } |
| @@ -254,7 +263,7 @@ static void __init early_cpu_detect(void) | |||
| 254 | } | 263 | } |
| 255 | } | 264 | } |
| 256 | 265 | ||
| 257 | void __devinit generic_identify(struct cpuinfo_x86 * c) | 266 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
| 258 | { | 267 | { |
| 259 | u32 tfms, xlvl; | 268 | u32 tfms, xlvl; |
| 260 | int junk; | 269 | int junk; |
| @@ -307,7 +316,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) | |||
| 307 | #endif | 316 | #endif |
| 308 | } | 317 | } |
| 309 | 318 | ||
| 310 | static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 319 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 311 | { | 320 | { |
| 312 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 321 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { |
| 313 | /* Disable processor serial number */ | 322 | /* Disable processor serial number */ |
| @@ -335,7 +344,7 @@ __setup("serialnumber", x86_serial_nr_setup); | |||
| 335 | /* | 344 | /* |
| 336 | * This does the hard work of actually picking apart the CPU stuff... | 345 | * This does the hard work of actually picking apart the CPU stuff... |
| 337 | */ | 346 | */ |
| 338 | void __devinit identify_cpu(struct cpuinfo_x86 *c) | 347 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
| 339 | { | 348 | { |
| 340 | int i; | 349 | int i; |
| 341 | 350 | ||
| @@ -405,6 +414,10 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 405 | clear_bit(X86_FEATURE_XMM, c->x86_capability); | 414 | clear_bit(X86_FEATURE_XMM, c->x86_capability); |
| 406 | } | 415 | } |
| 407 | 416 | ||
| 417 | /* SEP disabled? */ | ||
| 418 | if (disable_x86_sep) | ||
| 419 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | ||
| 420 | |||
| 408 | if (disable_pse) | 421 | if (disable_pse) |
| 409 | clear_bit(X86_FEATURE_PSE, c->x86_capability); | 422 | clear_bit(X86_FEATURE_PSE, c->x86_capability); |
| 410 | 423 | ||
| @@ -417,7 +430,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 417 | else | 430 | else |
| 418 | /* Last resort... */ | 431 | /* Last resort... */ |
| 419 | sprintf(c->x86_model_id, "%02x/%02x", | 432 | sprintf(c->x86_model_id, "%02x/%02x", |
| 420 | c->x86_vendor, c->x86_model); | 433 | c->x86, c->x86_model); |
| 421 | } | 434 | } |
| 422 | 435 | ||
| 423 | /* Now the feature flags better reflect actual CPU features! */ | 436 | /* Now the feature flags better reflect actual CPU features! */ |
| @@ -453,7 +466,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 453 | } | 466 | } |
| 454 | 467 | ||
| 455 | #ifdef CONFIG_X86_HT | 468 | #ifdef CONFIG_X86_HT |
| 456 | void __devinit detect_ht(struct cpuinfo_x86 *c) | 469 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
| 457 | { | 470 | { |
| 458 | u32 eax, ebx, ecx, edx; | 471 | u32 eax, ebx, ecx, edx; |
| 459 | int index_msb, core_bits; | 472 | int index_msb, core_bits; |
| @@ -500,7 +513,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c) | |||
| 500 | } | 513 | } |
| 501 | #endif | 514 | #endif |
| 502 | 515 | ||
| 503 | void __devinit print_cpu_info(struct cpuinfo_x86 *c) | 516 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
| 504 | { | 517 | { |
| 505 | char *vendor = NULL; | 518 | char *vendor = NULL; |
| 506 | 519 | ||
| @@ -523,7 +536,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c) | |||
| 523 | printk("\n"); | 536 | printk("\n"); |
| 524 | } | 537 | } |
| 525 | 538 | ||
| 526 | cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; | 539 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
| 527 | 540 | ||
| 528 | /* This is hacky. :) | 541 | /* This is hacky. :) |
| 529 | * We're emulating future behavior. | 542 | * We're emulating future behavior. |
| @@ -570,7 +583,7 @@ void __init early_cpu_init(void) | |||
| 570 | * and IDT. We reload them nevertheless, this function acts as a | 583 | * and IDT. We reload them nevertheless, this function acts as a |
| 571 | * 'CPU state barrier', nothing should get across. | 584 | * 'CPU state barrier', nothing should get across. |
| 572 | */ | 585 | */ |
| 573 | void __devinit cpu_init(void) | 586 | void __cpuinit cpu_init(void) |
| 574 | { | 587 | { |
| 575 | int cpu = smp_processor_id(); | 588 | int cpu = smp_processor_id(); |
| 576 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 589 | struct tss_struct * t = &per_cpu(init_tss, cpu); |
| @@ -670,7 +683,7 @@ void __devinit cpu_init(void) | |||
| 670 | } | 683 | } |
| 671 | 684 | ||
| 672 | #ifdef CONFIG_HOTPLUG_CPU | 685 | #ifdef CONFIG_HOTPLUG_CPU |
| 673 | void __devinit cpu_uninit(void) | 686 | void __cpuinit cpu_uninit(void) |
| 674 | { | 687 | { |
| 675 | int cpu = raw_smp_processor_id(); | 688 | int cpu = raw_smp_processor_id(); |
| 676 | cpu_clear(cpu, cpu_initialized); | 689 | cpu_clear(cpu, cpu_initialized); |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e11a09207ec8..3d5110b65cc3 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void) | |||
| 1145 | { | 1145 | { |
| 1146 | unsigned int i, supported_cpus = 0; | 1146 | unsigned int i, supported_cpus = 0; |
| 1147 | 1147 | ||
| 1148 | for (i=0; i<NR_CPUS; i++) { | 1148 | for_each_cpu(i) { |
| 1149 | if (!cpu_online(i)) | ||
| 1150 | continue; | ||
| 1151 | if (check_supported_cpu(i)) | 1149 | if (check_supported_cpu(i)) |
| 1152 | supported_cpus++; | 1150 | supported_cpus++; |
| 1153 | } | 1151 | } |
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 8c0120186b9f..5386b29bb5a5 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c | |||
| @@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void); | |||
| 29 | struct movsl_mask movsl_mask __read_mostly; | 29 | struct movsl_mask movsl_mask __read_mostly; |
| 30 | #endif | 30 | #endif |
| 31 | 31 | ||
| 32 | void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | 32 | void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) |
| 33 | { | 33 | { |
| 34 | if (c->x86_vendor != X86_VENDOR_INTEL) | 34 | if (c->x86_vendor != X86_VENDOR_INTEL) |
| 35 | return; | 35 | return; |
| @@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | |||
| 44 | * This is called before we do cpu ident work | 44 | * This is called before we do cpu ident work |
| 45 | */ | 45 | */ |
| 46 | 46 | ||
| 47 | int __devinit ppro_with_ram_bug(void) | 47 | int __cpuinit ppro_with_ram_bug(void) |
| 48 | { | 48 | { |
| 49 | /* Uses data from early_cpu_detect now */ | 49 | /* Uses data from early_cpu_detect now */ |
| 50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
| @@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void) | |||
| 62 | * P4 Xeon errata 037 workaround. | 62 | * P4 Xeon errata 037 workaround. |
| 63 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 63 | * Hardware prefetcher may cause stale data to be loaded into the cache. |
| 64 | */ | 64 | */ |
| 65 | static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 65 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) |
| 66 | { | 66 | { |
| 67 | unsigned long lo, hi; | 67 | unsigned long lo, hi; |
| 68 | 68 | ||
| @@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
| 81 | /* | 81 | /* |
| 82 | * find out the number of processor cores on the die | 82 | * find out the number of processor cores on the die |
| 83 | */ | 83 | */ |
| 84 | static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | 84 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) |
| 85 | { | 85 | { |
| 86 | unsigned int eax, ebx, ecx, edx; | 86 | unsigned int eax, ebx, ecx, edx; |
| 87 | 87 | ||
| @@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
| 96 | return 1; | 96 | return 1; |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void __devinit init_intel(struct cpuinfo_x86 *c) | 99 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
| 100 | { | 100 | { |
| 101 | unsigned int l2 = 0; | 101 | unsigned int l2 = 0; |
| 102 | char *p = NULL; | 102 | char *p = NULL; |
| @@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | |||
| 205 | return size; | 205 | return size; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static struct cpu_dev intel_cpu_dev __devinitdata = { | 208 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
| 209 | .c_vendor = "Intel", | 209 | .c_vendor = "Intel", |
| 210 | .c_ident = { "GenuineIntel" }, | 210 | .c_ident = { "GenuineIntel" }, |
| 211 | .c_models = { | 211 | .c_models = { |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ffe58cee0c48..ce61921369e5 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
| @@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 174 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
| 175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 175 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
| 176 | 176 | ||
| 177 | if (c->cpuid_level > 4) { | 177 | if (c->cpuid_level > 3) { |
| 178 | static int is_initialized; | 178 | static int is_initialized; |
| 179 | 179 | ||
| 180 | if (is_initialized == 0) { | 180 | if (is_initialized == 0) { |
| @@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 330 | } | 330 | } |
| 331 | } | 331 | } |
| 332 | } | 332 | } |
| 333 | static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 333 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) |
| 334 | { | 334 | { |
| 335 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 335 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
| 336 | int sibling; | 336 | int sibling; |
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 89a85af33d28..5cfbd8011698 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
| @@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 40 | /* Other (Linux-defined) */ | 40 | /* Other (Linux-defined) */ |
| 41 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | 41 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", |
| 42 | NULL, NULL, NULL, NULL, | 42 | NULL, NULL, NULL, NULL, |
| 43 | "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 43 | "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL, |
| 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 45 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 45 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 46 | 46 | ||
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index d49dbe8dc96b..e3c5fca0aa8a 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c | |||
| @@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) | |||
| 105 | return 1; | 105 | return 1; |
| 106 | local_irq_disable(); | 106 | local_irq_disable(); |
| 107 | 107 | ||
| 108 | if (!user_mode(regs)) { | 108 | if (!user_mode_vm(regs)) { |
| 109 | crash_fixup_ss_esp(&fixed_regs, regs); | 109 | crash_fixup_ss_esp(&fixed_regs, regs); |
| 110 | regs = &fixed_regs; | 110 | regs = &fixed_regs; |
| 111 | } | 111 | } |
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index c9cad7ba0d2d..7ec6cfa01fb3 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c | |||
| @@ -115,7 +115,7 @@ static void efi_call_phys_epilog(void) | |||
| 115 | unsigned long cr4; | 115 | unsigned long cr4; |
| 116 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0); | 116 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0); |
| 117 | 117 | ||
| 118 | cpu_gdt_descr->address = __va(cpu_gdt_descr->address); | 118 | cpu_gdt_descr->address = (unsigned long)__va(cpu_gdt_descr->address); |
| 119 | load_gdt(cpu_gdt_descr); | 119 | load_gdt(cpu_gdt_descr); |
| 120 | 120 | ||
| 121 | cr4 = read_cr4(); | 121 | cr4 = read_cr4(); |
| @@ -543,7 +543,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, | |||
| 543 | if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) > | 543 | if ((md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) > |
| 544 | 0x100000000ULL) | 544 | 0x100000000ULL) |
| 545 | continue; | 545 | continue; |
| 546 | res = alloc_bootmem_low(sizeof(struct resource)); | 546 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); |
| 547 | switch (md->type) { | 547 | switch (md->type) { |
| 548 | case EFI_RESERVED_TYPE: | 548 | case EFI_RESERVED_TYPE: |
| 549 | res->name = "Reserved Memory"; | 549 | res->name = "Reserved Memory"; |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 4d704724b2f5..cfc683f153b9 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
| @@ -226,6 +226,10 @@ ENTRY(system_call) | |||
| 226 | pushl %eax # save orig_eax | 226 | pushl %eax # save orig_eax |
| 227 | SAVE_ALL | 227 | SAVE_ALL |
| 228 | GET_THREAD_INFO(%ebp) | 228 | GET_THREAD_INFO(%ebp) |
| 229 | testl $TF_MASK,EFLAGS(%esp) | ||
| 230 | jz no_singlestep | ||
| 231 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
| 232 | no_singlestep: | ||
| 229 | # system call tracing in operation / emulation | 233 | # system call tracing in operation / emulation |
| 230 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 234 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
| 231 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 235 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index e0b7c632efbc..3debc2e26542 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
| @@ -450,7 +450,6 @@ int_msg: | |||
| 450 | 450 | ||
| 451 | .globl boot_gdt_descr | 451 | .globl boot_gdt_descr |
| 452 | .globl idt_descr | 452 | .globl idt_descr |
| 453 | .globl cpu_gdt_descr | ||
| 454 | 453 | ||
| 455 | ALIGN | 454 | ALIGN |
| 456 | # early boot GDT descriptor (must use 1:1 address mapping) | 455 | # early boot GDT descriptor (must use 1:1 address mapping) |
| @@ -470,8 +469,6 @@ cpu_gdt_descr: | |||
| 470 | .word GDT_ENTRIES*8-1 | 469 | .word GDT_ENTRIES*8-1 |
| 471 | .long cpu_gdt_table | 470 | .long cpu_gdt_table |
| 472 | 471 | ||
| 473 | .fill NR_CPUS-1,8,0 # space for the other GDT descriptors | ||
| 474 | |||
| 475 | /* | 472 | /* |
| 476 | * The boot_gdt_table must mirror the equivalent in setup.S and is | 473 | * The boot_gdt_table must mirror the equivalent in setup.S and is |
| 477 | * used only for booting. | 474 | * used only for booting. |
| @@ -485,7 +482,7 @@ ENTRY(boot_gdt_table) | |||
| 485 | /* | 482 | /* |
| 486 | * The Global Descriptor Table contains 28 quadwords, per-CPU. | 483 | * The Global Descriptor Table contains 28 quadwords, per-CPU. |
| 487 | */ | 484 | */ |
| 488 | .align PAGE_SIZE_asm | 485 | .align L1_CACHE_BYTES |
| 489 | ENTRY(cpu_gdt_table) | 486 | ENTRY(cpu_gdt_table) |
| 490 | .quad 0x0000000000000000 /* NULL descriptor */ | 487 | .quad 0x0000000000000000 /* NULL descriptor */ |
| 491 | .quad 0x0000000000000000 /* 0x0b reserved */ | 488 | .quad 0x0000000000000000 /* 0x0b reserved */ |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 39d9a5fa907e..311b4e7266f1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
| @@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | |||
| 351 | { | 351 | { |
| 352 | int i, j; | 352 | int i, j; |
| 353 | Dprintk("Rotating IRQs among CPUs.\n"); | 353 | Dprintk("Rotating IRQs among CPUs.\n"); |
| 354 | for (i = 0; i < NR_CPUS; i++) { | 354 | for_each_online_cpu(i) { |
| 355 | for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { | 355 | for (j = 0; j < NR_IRQS; j++) { |
| 356 | if (!irq_desc[j].action) | 356 | if (!irq_desc[j].action) |
| 357 | continue; | 357 | continue; |
| 358 | /* Is it a significant load ? */ | 358 | /* Is it a significant load ? */ |
| @@ -381,7 +381,7 @@ static void do_irq_balance(void) | |||
| 381 | unsigned long imbalance = 0; | 381 | unsigned long imbalance = 0; |
| 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 382 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
| 383 | 383 | ||
| 384 | for (i = 0; i < NR_CPUS; i++) { | 384 | for_each_cpu(i) { |
| 385 | int package_index; | 385 | int package_index; |
| 386 | CPU_IRQ(i) = 0; | 386 | CPU_IRQ(i) = 0; |
| 387 | if (!cpu_online(i)) | 387 | if (!cpu_online(i)) |
| @@ -422,9 +422,7 @@ static void do_irq_balance(void) | |||
| 422 | } | 422 | } |
| 423 | } | 423 | } |
| 424 | /* Find the least loaded processor package */ | 424 | /* Find the least loaded processor package */ |
| 425 | for (i = 0; i < NR_CPUS; i++) { | 425 | for_each_online_cpu(i) { |
| 426 | if (!cpu_online(i)) | ||
| 427 | continue; | ||
| 428 | if (i != CPU_TO_PACKAGEINDEX(i)) | 426 | if (i != CPU_TO_PACKAGEINDEX(i)) |
| 429 | continue; | 427 | continue; |
| 430 | if (min_cpu_irq > CPU_IRQ(i)) { | 428 | if (min_cpu_irq > CPU_IRQ(i)) { |
| @@ -441,9 +439,7 @@ tryanothercpu: | |||
| 441 | */ | 439 | */ |
| 442 | tmp_cpu_irq = 0; | 440 | tmp_cpu_irq = 0; |
| 443 | tmp_loaded = -1; | 441 | tmp_loaded = -1; |
| 444 | for (i = 0; i < NR_CPUS; i++) { | 442 | for_each_online_cpu(i) { |
| 445 | if (!cpu_online(i)) | ||
| 446 | continue; | ||
| 447 | if (i != CPU_TO_PACKAGEINDEX(i)) | 443 | if (i != CPU_TO_PACKAGEINDEX(i)) |
| 448 | continue; | 444 | continue; |
| 449 | if (max_cpu_irq <= CPU_IRQ(i)) | 445 | if (max_cpu_irq <= CPU_IRQ(i)) |
| @@ -619,9 +615,7 @@ static int __init balanced_irq_init(void) | |||
| 619 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) | 615 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) |
| 620 | physical_balance = 1; | 616 | physical_balance = 1; |
| 621 | 617 | ||
| 622 | for (i = 0; i < NR_CPUS; i++) { | 618 | for_each_online_cpu(i) { |
| 623 | if (!cpu_online(i)) | ||
| 624 | continue; | ||
| 625 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 619 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
| 626 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 620 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
| 627 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | 621 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { |
| @@ -638,9 +632,11 @@ static int __init balanced_irq_init(void) | |||
| 638 | else | 632 | else |
| 639 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | 633 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); |
| 640 | failed: | 634 | failed: |
| 641 | for (i = 0; i < NR_CPUS; i++) { | 635 | for_each_cpu(i) { |
| 642 | kfree(irq_cpu_data[i].irq_delta); | 636 | kfree(irq_cpu_data[i].irq_delta); |
| 637 | irq_cpu_data[i].irq_delta = NULL; | ||
| 643 | kfree(irq_cpu_data[i].last_irq); | 638 | kfree(irq_cpu_data[i].last_irq); |
| 639 | irq_cpu_data[i].last_irq = NULL; | ||
| 644 | } | 640 | } |
| 645 | return 0; | 641 | return 0; |
| 646 | } | 642 | } |
| @@ -1761,7 +1757,8 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
| 1761 | * Don't check I/O APIC IDs for xAPIC systems. They have | 1757 | * Don't check I/O APIC IDs for xAPIC systems. They have |
| 1762 | * no meaning without the serial APIC bus. | 1758 | * no meaning without the serial APIC bus. |
| 1763 | */ | 1759 | */ |
| 1764 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) | 1760 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 1761 | || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 1765 | return; | 1762 | return; |
| 1766 | /* | 1763 | /* |
| 1767 | * This is broken; anything with a real cpu count has to | 1764 | * This is broken; anything with a real cpu count has to |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 694a13997637..7a59050242a7 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
| @@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
| 84 | 84 | ||
| 85 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 85 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 86 | { | 86 | { |
| 87 | down(&kprobe_mutex); | 87 | mutex_lock(&kprobe_mutex); |
| 88 | free_insn_slot(p->ainsn.insn); | 88 | free_insn_slot(p->ainsn.insn); |
| 89 | up(&kprobe_mutex); | 89 | mutex_unlock(&kprobe_mutex); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) | 92 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c index 5149c8a621f0..470cf97e7cd3 100644 --- a/arch/i386/kernel/module.c +++ b/arch/i386/kernel/module.c | |||
| @@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
| 104 | return -ENOEXEC; | 104 | return -ENOEXEC; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | extern void apply_alternatives(void *start, void *end); | ||
| 108 | |||
| 109 | int module_finalize(const Elf_Ehdr *hdr, | 107 | int module_finalize(const Elf_Ehdr *hdr, |
| 110 | const Elf_Shdr *sechdrs, | 108 | const Elf_Shdr *sechdrs, |
| 111 | struct module *me) | 109 | struct module *me) |
| 112 | { | 110 | { |
| 113 | const Elf_Shdr *s; | 111 | const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; |
| 114 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | 112 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
| 115 | 113 | ||
| 116 | /* look for .altinstructions to patch */ | ||
| 117 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { | 114 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { |
| 118 | void *seg; | 115 | if (!strcmp(".text", secstrings + s->sh_name)) |
| 119 | if (strcmp(".altinstructions", secstrings + s->sh_name)) | 116 | text = s; |
| 120 | continue; | 117 | if (!strcmp(".altinstructions", secstrings + s->sh_name)) |
| 121 | seg = (void *)s->sh_addr; | 118 | alt = s; |
| 122 | apply_alternatives(seg, seg + s->sh_size); | 119 | if (!strcmp(".smp_locks", secstrings + s->sh_name)) |
| 123 | } | 120 | locks= s; |
| 121 | } | ||
| 122 | |||
| 123 | if (alt) { | ||
| 124 | /* patch .altinstructions */ | ||
| 125 | void *aseg = (void *)alt->sh_addr; | ||
| 126 | apply_alternatives(aseg, aseg + alt->sh_size); | ||
| 127 | } | ||
| 128 | if (locks && text) { | ||
| 129 | void *lseg = (void *)locks->sh_addr; | ||
| 130 | void *tseg = (void *)text->sh_addr; | ||
| 131 | alternatives_smp_module_add(me, me->name, | ||
| 132 | lseg, lseg + locks->sh_size, | ||
| 133 | tseg, tseg + text->sh_size); | ||
| 134 | } | ||
| 124 | return 0; | 135 | return 0; |
| 125 | } | 136 | } |
| 126 | 137 | ||
| 127 | void module_arch_cleanup(struct module *mod) | 138 | void module_arch_cleanup(struct module *mod) |
| 128 | { | 139 | { |
| 140 | alternatives_smp_module_del(mod); | ||
| 129 | } | 141 | } |
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index e6e2f43db85e..8d8aa9d1796d 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c | |||
| @@ -828,6 +828,8 @@ void __init find_smp_config (void) | |||
| 828 | smp_scan_config(address, 0x400); | 828 | smp_scan_config(address, 0x400); |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | int es7000_plat; | ||
| 832 | |||
| 831 | /* -------------------------------------------------------------------------- | 833 | /* -------------------------------------------------------------------------- |
| 832 | ACPI-based MP Configuration | 834 | ACPI-based MP Configuration |
| 833 | -------------------------------------------------------------------------- */ | 835 | -------------------------------------------------------------------------- */ |
| @@ -935,7 +937,8 @@ void __init mp_register_ioapic ( | |||
| 935 | mp_ioapics[idx].mpc_apicaddr = address; | 937 | mp_ioapics[idx].mpc_apicaddr = address; |
| 936 | 938 | ||
| 937 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 939 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
| 938 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) | 940 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 941 | && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 939 | tmpid = io_apic_get_unique_id(idx, id); | 942 | tmpid = io_apic_get_unique_id(idx, id); |
| 940 | else | 943 | else |
| 941 | tmpid = id; | 944 | tmpid = id; |
| @@ -1011,8 +1014,6 @@ void __init mp_override_legacy_irq ( | |||
| 1011 | return; | 1014 | return; |
| 1012 | } | 1015 | } |
| 1013 | 1016 | ||
| 1014 | int es7000_plat; | ||
| 1015 | |||
| 1016 | void __init mp_config_acpi_legacy_irqs (void) | 1017 | void __init mp_config_acpi_legacy_irqs (void) |
| 1017 | { | 1018 | { |
| 1018 | struct mpc_config_intsrc intsrc; | 1019 | struct mpc_config_intsrc intsrc; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index be87c5e2ee95..9074818b9473 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
| @@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) | |||
| 143 | local_irq_enable(); | 143 | local_irq_enable(); |
| 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 144 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
| 145 | 145 | ||
| 146 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 146 | for_each_cpu(cpu) { |
| 147 | #ifdef CONFIG_SMP | 147 | #ifdef CONFIG_SMP |
| 148 | /* Check cpu_callin_map here because that is set | 148 | /* Check cpu_callin_map here because that is set |
| 149 | after the timer is started. */ | 149 | after the timer is started. */ |
| @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) | |||
| 510 | * Just reset the alert counters, (other CPUs might be | 510 | * Just reset the alert counters, (other CPUs might be |
| 511 | * spinning on locks we hold): | 511 | * spinning on locks we hold): |
| 512 | */ | 512 | */ |
| 513 | for (i = 0; i < NR_CPUS; i++) | 513 | for_each_cpu(i) |
| 514 | alert_counter[i] = 0; | 514 | alert_counter[i] = 0; |
| 515 | 515 | ||
| 516 | /* | 516 | /* |
| @@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
| 543 | /* | 543 | /* |
| 544 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 544 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 545 | */ | 545 | */ |
| 546 | die_nmi(regs, "NMI Watchdog detected LOCKUP"); | 546 | die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP"); |
| 547 | } else { | 547 | } else { |
| 548 | last_irq_sums[cpu] = sum; | 548 | last_irq_sums[cpu] = sum; |
| 549 | alert_counter[cpu] = 0; | 549 | alert_counter[cpu] = 0; |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 0480454ebffa..299e61674084 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
| @@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs) | |||
| 295 | printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); | 295 | printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); |
| 296 | print_symbol("EIP is at %s\n", regs->eip); | 296 | print_symbol("EIP is at %s\n", regs->eip); |
| 297 | 297 | ||
| 298 | if (user_mode(regs)) | 298 | if (user_mode_vm(regs)) |
| 299 | printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); | 299 | printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); |
| 300 | printk(" EFLAGS: %08lx %s (%s %.*s)\n", | 300 | printk(" EFLAGS: %08lx %s (%s %.*s)\n", |
| 301 | regs->eflags, print_tainted(), system_utsname.release, | 301 | regs->eflags, print_tainted(), system_utsname.release, |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 5c1fb6aada5b..506462ef36a0 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
| @@ -34,10 +34,10 @@ | |||
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * Determines which flags the user has access to [1 = access, 0 = no access]. | 36 | * Determines which flags the user has access to [1 = access, 0 = no access]. |
| 37 | * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). | 37 | * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9). |
| 38 | * Also masks reserved bits (31-22, 15, 5, 3, 1). | 38 | * Also masks reserved bits (31-22, 15, 5, 3, 1). |
| 39 | */ | 39 | */ |
| 40 | #define FLAG_MASK 0x00054dd5 | 40 | #define FLAG_MASK 0x00050dd5 |
| 41 | 41 | ||
| 42 | /* set's the trap flag. */ | 42 | /* set's the trap flag. */ |
| 43 | #define TRAP_FLAG 0x100 | 43 | #define TRAP_FLAG 0x100 |
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index 7455ab643943..967dc74df9ee 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c | |||
| @@ -110,11 +110,11 @@ asm( | |||
| 110 | ".align 4\n" | 110 | ".align 4\n" |
| 111 | ".globl __write_lock_failed\n" | 111 | ".globl __write_lock_failed\n" |
| 112 | "__write_lock_failed:\n\t" | 112 | "__write_lock_failed:\n\t" |
| 113 | LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" | 113 | LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" |
| 114 | "1: rep; nop\n\t" | 114 | "1: rep; nop\n\t" |
| 115 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | 115 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" |
| 116 | "jne 1b\n\t" | 116 | "jne 1b\n\t" |
| 117 | LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | 117 | LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" |
| 118 | "jnz __write_lock_failed\n\t" | 118 | "jnz __write_lock_failed\n\t" |
| 119 | "ret" | 119 | "ret" |
| 120 | ); | 120 | ); |
| @@ -124,11 +124,11 @@ asm( | |||
| 124 | ".align 4\n" | 124 | ".align 4\n" |
| 125 | ".globl __read_lock_failed\n" | 125 | ".globl __read_lock_failed\n" |
| 126 | "__read_lock_failed:\n\t" | 126 | "__read_lock_failed:\n\t" |
| 127 | LOCK "incl (%eax)\n" | 127 | LOCK_PREFIX "incl (%eax)\n" |
| 128 | "1: rep; nop\n\t" | 128 | "1: rep; nop\n\t" |
| 129 | "cmpl $1,(%eax)\n\t" | 129 | "cmpl $1,(%eax)\n\t" |
| 130 | "js 1b\n\t" | 130 | "js 1b\n\t" |
| 131 | LOCK "decl (%eax)\n\t" | 131 | LOCK_PREFIX "decl (%eax)\n\t" |
| 132 | "js __read_lock_failed\n\t" | 132 | "js __read_lock_failed\n\t" |
| 133 | "ret" | 133 | "ret" |
| 134 | ); | 134 | ); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index ab62a9f4701e..d313a11acafa 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
| @@ -1288,7 +1288,7 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat | |||
| 1288 | struct resource *res; | 1288 | struct resource *res; |
| 1289 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) | 1289 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) |
| 1290 | continue; | 1290 | continue; |
| 1291 | res = alloc_bootmem_low(sizeof(struct resource)); | 1291 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); |
| 1292 | switch (e820.map[i].type) { | 1292 | switch (e820.map[i].type) { |
| 1293 | case E820_RAM: res->name = "System RAM"; break; | 1293 | case E820_RAM: res->name = "System RAM"; break; |
| 1294 | case E820_ACPI: res->name = "ACPI Tables"; break; | 1294 | case E820_ACPI: res->name = "ACPI Tables"; break; |
| @@ -1316,13 +1316,15 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat | |||
| 1316 | 1316 | ||
| 1317 | /* | 1317 | /* |
| 1318 | * Request address space for all standard resources | 1318 | * Request address space for all standard resources |
| 1319 | * | ||
| 1320 | * This is called just before pcibios_assign_resources(), which is also | ||
| 1321 | * an fs_initcall, but is linked in later (in arch/i386/pci/i386.c). | ||
| 1319 | */ | 1322 | */ |
| 1320 | static void __init register_memory(void) | 1323 | static int __init request_standard_resources(void) |
| 1321 | { | 1324 | { |
| 1322 | unsigned long gapstart, gapsize, round; | 1325 | int i; |
| 1323 | unsigned long long last; | ||
| 1324 | int i; | ||
| 1325 | 1326 | ||
| 1327 | printk("Setting up standard PCI resources\n"); | ||
| 1326 | if (efi_enabled) | 1328 | if (efi_enabled) |
| 1327 | efi_initialize_iomem_resources(&code_resource, &data_resource); | 1329 | efi_initialize_iomem_resources(&code_resource, &data_resource); |
| 1328 | else | 1330 | else |
| @@ -1334,6 +1336,16 @@ static void __init register_memory(void) | |||
| 1334 | /* request I/O space for devices used on all i[345]86 PCs */ | 1336 | /* request I/O space for devices used on all i[345]86 PCs */ |
| 1335 | for (i = 0; i < STANDARD_IO_RESOURCES; i++) | 1337 | for (i = 0; i < STANDARD_IO_RESOURCES; i++) |
| 1336 | request_resource(&ioport_resource, &standard_io_resources[i]); | 1338 | request_resource(&ioport_resource, &standard_io_resources[i]); |
| 1339 | return 0; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | fs_initcall(request_standard_resources); | ||
| 1343 | |||
| 1344 | static void __init register_memory(void) | ||
| 1345 | { | ||
| 1346 | unsigned long gapstart, gapsize, round; | ||
| 1347 | unsigned long long last; | ||
| 1348 | int i; | ||
| 1337 | 1349 | ||
| 1338 | /* | 1350 | /* |
| 1339 | * Search for the bigest gap in the low 32 bits of the e820 | 1351 | * Search for the bigest gap in the low 32 bits of the e820 |
| @@ -1377,101 +1389,6 @@ static void __init register_memory(void) | |||
| 1377 | pci_mem_start, gapstart, gapsize); | 1389 | pci_mem_start, gapstart, gapsize); |
| 1378 | } | 1390 | } |
| 1379 | 1391 | ||
| 1380 | /* Use inline assembly to define this because the nops are defined | ||
| 1381 | as inline assembly strings in the include files and we cannot | ||
| 1382 | get them easily into strings. */ | ||
| 1383 | asm("\t.data\nintelnops: " | ||
| 1384 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | ||
| 1385 | GENERIC_NOP7 GENERIC_NOP8); | ||
| 1386 | asm("\t.data\nk8nops: " | ||
| 1387 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | ||
| 1388 | K8_NOP7 K8_NOP8); | ||
| 1389 | asm("\t.data\nk7nops: " | ||
| 1390 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | ||
| 1391 | K7_NOP7 K7_NOP8); | ||
| 1392 | |||
| 1393 | extern unsigned char intelnops[], k8nops[], k7nops[]; | ||
| 1394 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | ||
| 1395 | NULL, | ||
| 1396 | intelnops, | ||
| 1397 | intelnops + 1, | ||
| 1398 | intelnops + 1 + 2, | ||
| 1399 | intelnops + 1 + 2 + 3, | ||
| 1400 | intelnops + 1 + 2 + 3 + 4, | ||
| 1401 | intelnops + 1 + 2 + 3 + 4 + 5, | ||
| 1402 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1403 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1404 | }; | ||
| 1405 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | ||
| 1406 | NULL, | ||
| 1407 | k8nops, | ||
| 1408 | k8nops + 1, | ||
| 1409 | k8nops + 1 + 2, | ||
| 1410 | k8nops + 1 + 2 + 3, | ||
| 1411 | k8nops + 1 + 2 + 3 + 4, | ||
| 1412 | k8nops + 1 + 2 + 3 + 4 + 5, | ||
| 1413 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1414 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1415 | }; | ||
| 1416 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | ||
| 1417 | NULL, | ||
| 1418 | k7nops, | ||
| 1419 | k7nops + 1, | ||
| 1420 | k7nops + 1 + 2, | ||
| 1421 | k7nops + 1 + 2 + 3, | ||
| 1422 | k7nops + 1 + 2 + 3 + 4, | ||
| 1423 | k7nops + 1 + 2 + 3 + 4 + 5, | ||
| 1424 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | ||
| 1425 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | ||
| 1426 | }; | ||
| 1427 | static struct nop { | ||
| 1428 | int cpuid; | ||
| 1429 | unsigned char **noptable; | ||
| 1430 | } noptypes[] = { | ||
| 1431 | { X86_FEATURE_K8, k8_nops }, | ||
| 1432 | { X86_FEATURE_K7, k7_nops }, | ||
| 1433 | { -1, NULL } | ||
| 1434 | }; | ||
| 1435 | |||
| 1436 | /* Replace instructions with better alternatives for this CPU type. | ||
| 1437 | |||
| 1438 | This runs before SMP is initialized to avoid SMP problems with | ||
| 1439 | self modifying code. This implies that assymetric systems where | ||
| 1440 | APs have less capabilities than the boot processor are not handled. | ||
| 1441 | Tough. Make sure you disable such features by hand. */ | ||
| 1442 | void apply_alternatives(void *start, void *end) | ||
| 1443 | { | ||
| 1444 | struct alt_instr *a; | ||
| 1445 | int diff, i, k; | ||
| 1446 | unsigned char **noptable = intel_nops; | ||
| 1447 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | ||
| 1448 | if (boot_cpu_has(noptypes[i].cpuid)) { | ||
| 1449 | noptable = noptypes[i].noptable; | ||
| 1450 | break; | ||
| 1451 | } | ||
| 1452 | } | ||
| 1453 | for (a = start; (void *)a < end; a++) { | ||
| 1454 | if (!boot_cpu_has(a->cpuid)) | ||
| 1455 | continue; | ||
| 1456 | BUG_ON(a->replacementlen > a->instrlen); | ||
| 1457 | memcpy(a->instr, a->replacement, a->replacementlen); | ||
| 1458 | diff = a->instrlen - a->replacementlen; | ||
| 1459 | /* Pad the rest with nops */ | ||
| 1460 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { | ||
| 1461 | k = diff; | ||
| 1462 | if (k > ASM_NOP_MAX) | ||
| 1463 | k = ASM_NOP_MAX; | ||
| 1464 | memcpy(a->instr + i, noptable[k], k); | ||
| 1465 | } | ||
| 1466 | } | ||
| 1467 | } | ||
| 1468 | |||
| 1469 | void __init alternative_instructions(void) | ||
| 1470 | { | ||
| 1471 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | ||
| 1472 | apply_alternatives(__alt_instructions, __alt_instructions_end); | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | static char * __init machine_specific_memory_setup(void); | 1392 | static char * __init machine_specific_memory_setup(void); |
| 1476 | 1393 | ||
| 1477 | #ifdef CONFIG_MCA | 1394 | #ifdef CONFIG_MCA |
| @@ -1554,6 +1471,16 @@ void __init setup_arch(char **cmdline_p) | |||
| 1554 | 1471 | ||
| 1555 | parse_cmdline_early(cmdline_p); | 1472 | parse_cmdline_early(cmdline_p); |
| 1556 | 1473 | ||
| 1474 | #ifdef CONFIG_EARLY_PRINTK | ||
| 1475 | { | ||
| 1476 | char *s = strstr(*cmdline_p, "earlyprintk="); | ||
| 1477 | if (s) { | ||
| 1478 | setup_early_printk(strchr(s, '=') + 1); | ||
| 1479 | printk("early console enabled\n"); | ||
| 1480 | } | ||
| 1481 | } | ||
| 1482 | #endif | ||
| 1483 | |||
| 1557 | max_low_pfn = setup_memory(); | 1484 | max_low_pfn = setup_memory(); |
| 1558 | 1485 | ||
| 1559 | /* | 1486 | /* |
| @@ -1578,19 +1505,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 1578 | * NOTE: at this point the bootmem allocator is fully available. | 1505 | * NOTE: at this point the bootmem allocator is fully available. |
| 1579 | */ | 1506 | */ |
| 1580 | 1507 | ||
| 1581 | #ifdef CONFIG_EARLY_PRINTK | ||
| 1582 | { | ||
| 1583 | char *s = strstr(*cmdline_p, "earlyprintk="); | ||
| 1584 | if (s) { | ||
| 1585 | extern void setup_early_printk(char *); | ||
| 1586 | |||
| 1587 | setup_early_printk(strchr(s, '=') + 1); | ||
| 1588 | printk("early console enabled\n"); | ||
| 1589 | } | ||
| 1590 | } | ||
| 1591 | #endif | ||
| 1592 | |||
| 1593 | |||
| 1594 | dmi_scan_machine(); | 1508 | dmi_scan_machine(); |
| 1595 | 1509 | ||
| 1596 | #ifdef CONFIG_X86_GENERICARCH | 1510 | #ifdef CONFIG_X86_GENERICARCH |
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 963616d364ec..5c352c3a9e7f 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c | |||
| @@ -123,7 +123,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax | |||
| 123 | err |= __get_user(tmp, &sc->seg); \ | 123 | err |= __get_user(tmp, &sc->seg); \ |
| 124 | loadsegment(seg,tmp); } | 124 | loadsegment(seg,tmp); } |
| 125 | 125 | ||
| 126 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ | 126 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ |
| 127 | X86_EFLAGS_OF | X86_EFLAGS_DF | \ | ||
| 127 | X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ | 128 | X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ |
| 128 | X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) | 129 | X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) |
| 129 | 130 | ||
| @@ -582,9 +583,6 @@ static void fastcall do_signal(struct pt_regs *regs) | |||
| 582 | if (!user_mode(regs)) | 583 | if (!user_mode(regs)) |
| 583 | return; | 584 | return; |
| 584 | 585 | ||
| 585 | if (try_to_freeze()) | ||
| 586 | goto no_signal; | ||
| 587 | |||
| 588 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 586 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
| 589 | oldset = ¤t->saved_sigmask; | 587 | oldset = ¤t->saved_sigmask; |
| 590 | else | 588 | else |
| @@ -613,7 +611,6 @@ static void fastcall do_signal(struct pt_regs *regs) | |||
| 613 | return; | 611 | return; |
| 614 | } | 612 | } |
| 615 | 613 | ||
| 616 | no_signal: | ||
| 617 | /* Did we come from a system call? */ | 614 | /* Did we come from a system call? */ |
| 618 | if (regs->orig_eax >= 0) { | 615 | if (regs->orig_eax >= 0) { |
| 619 | /* Restart the system call - no handlers present */ | 616 | /* Restart the system call - no handlers present */ |
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 218d725a5a1e..d134e9643a58 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
| @@ -504,27 +504,23 @@ void unlock_ipi_call_lock(void) | |||
| 504 | spin_unlock_irq(&call_lock); | 504 | spin_unlock_irq(&call_lock); |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | static struct call_data_struct * call_data; | 507 | static struct call_data_struct *call_data; |
| 508 | 508 | ||
| 509 | /* | 509 | /** |
| 510 | * this function sends a 'generic call function' IPI to all other CPUs | 510 | * smp_call_function(): Run a function on all other CPUs. |
| 511 | * in the system. | 511 | * @func: The function to run. This must be fast and non-blocking. |
| 512 | */ | 512 | * @info: An arbitrary pointer to pass to the function. |
| 513 | 513 | * @nonatomic: currently unused. | |
| 514 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 514 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
| 515 | int wait) | 515 | * |
| 516 | /* | 516 | * Returns 0 on success, else a negative status code. Does not return until |
| 517 | * [SUMMARY] Run a function on all other CPUs. | ||
| 518 | * <func> The function to run. This must be fast and non-blocking. | ||
| 519 | * <info> An arbitrary pointer to pass to the function. | ||
| 520 | * <nonatomic> currently unused. | ||
| 521 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
| 522 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
| 523 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | 517 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. |
| 524 | * | 518 | * |
| 525 | * You must not call this function with disabled interrupts or from a | 519 | * You must not call this function with disabled interrupts or from a |
| 526 | * hardware interrupt handler or from a bottom half handler. | 520 | * hardware interrupt handler or from a bottom half handler. |
| 527 | */ | 521 | */ |
| 522 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
| 523 | int wait) | ||
| 528 | { | 524 | { |
| 529 | struct call_data_struct data; | 525 | struct call_data_struct data; |
| 530 | int cpus; | 526 | int cpus; |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 7007e1783797..4c470e99a742 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
| @@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu) | |||
| 899 | unsigned short nmi_high = 0, nmi_low = 0; | 899 | unsigned short nmi_high = 0, nmi_low = 0; |
| 900 | 900 | ||
| 901 | ++cpucount; | 901 | ++cpucount; |
| 902 | alternatives_smp_switch(1); | ||
| 902 | 903 | ||
| 903 | /* | 904 | /* |
| 904 | * We can't use kernel_thread since we must avoid to | 905 | * We can't use kernel_thread since we must avoid to |
| @@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu) | |||
| 1368 | /* They ack this in play_dead by setting CPU_DEAD */ | 1369 | /* They ack this in play_dead by setting CPU_DEAD */ |
| 1369 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | 1370 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
| 1370 | printk ("CPU %d is now offline\n", cpu); | 1371 | printk ("CPU %d is now offline\n", cpu); |
| 1372 | if (1 == num_online_cpus()) | ||
| 1373 | alternatives_smp_switch(0); | ||
| 1371 | return; | 1374 | return; |
| 1372 | } | 1375 | } |
| 1373 | msleep(100); | 1376 | msleep(100); |
diff --git a/arch/i386/kernel/sys_i386.c b/arch/i386/kernel/sys_i386.c index a4a61976ecb9..8fdb1fb17a5f 100644 --- a/arch/i386/kernel/sys_i386.c +++ b/arch/i386/kernel/sys_i386.c | |||
| @@ -40,14 +40,13 @@ asmlinkage int sys_pipe(unsigned long __user * fildes) | |||
| 40 | return error; | 40 | return error; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | /* common code for old and new mmaps */ | 43 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, |
| 44 | static inline long do_mmap2( | 44 | unsigned long prot, unsigned long flags, |
| 45 | unsigned long addr, unsigned long len, | 45 | unsigned long fd, unsigned long pgoff) |
| 46 | unsigned long prot, unsigned long flags, | ||
| 47 | unsigned long fd, unsigned long pgoff) | ||
| 48 | { | 46 | { |
| 49 | int error = -EBADF; | 47 | int error = -EBADF; |
| 50 | struct file * file = NULL; | 48 | struct file *file = NULL; |
| 49 | struct mm_struct *mm = current->mm; | ||
| 51 | 50 | ||
| 52 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 51 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
| 53 | if (!(flags & MAP_ANONYMOUS)) { | 52 | if (!(flags & MAP_ANONYMOUS)) { |
| @@ -56,9 +55,9 @@ static inline long do_mmap2( | |||
| 56 | goto out; | 55 | goto out; |
| 57 | } | 56 | } |
| 58 | 57 | ||
| 59 | down_write(¤t->mm->mmap_sem); | 58 | down_write(&mm->mmap_sem); |
| 60 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 59 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
| 61 | up_write(¤t->mm->mmap_sem); | 60 | up_write(&mm->mmap_sem); |
| 62 | 61 | ||
| 63 | if (file) | 62 | if (file) |
| 64 | fput(file); | 63 | fput(file); |
| @@ -66,13 +65,6 @@ out: | |||
| 66 | return error; | 65 | return error; |
| 67 | } | 66 | } |
| 68 | 67 | ||
| 69 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
| 70 | unsigned long prot, unsigned long flags, | ||
| 71 | unsigned long fd, unsigned long pgoff) | ||
| 72 | { | ||
| 73 | return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
| 74 | } | ||
| 75 | |||
| 76 | /* | 68 | /* |
| 77 | * Perform the select(nd, in, out, ex, tv) and mmap() system | 69 | * Perform the select(nd, in, out, ex, tv) and mmap() system |
| 78 | * calls. Linux/i386 didn't use to be able to handle more than | 70 | * calls. Linux/i386 didn't use to be able to handle more than |
| @@ -101,7 +93,8 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | |||
| 101 | if (a.offset & ~PAGE_MASK) | 93 | if (a.offset & ~PAGE_MASK) |
| 102 | goto out; | 94 | goto out; |
| 103 | 95 | ||
| 104 | err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | 96 | err = sys_mmap2(a.addr, a.len, a.prot, a.flags, |
| 97 | a.fd, a.offset >> PAGE_SHIFT); | ||
| 105 | out: | 98 | out: |
| 106 | return err; | 99 | return err; |
| 107 | } | 100 | } |
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c index be242723c339..17a6fe7166e7 100644 --- a/arch/i386/kernel/timers/timer_hpet.c +++ b/arch/i386/kernel/timers/timer_hpet.c | |||
| @@ -46,7 +46,7 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; | |||
| 46 | * | 46 | * |
| 47 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 47 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
| 48 | */ | 48 | */ |
| 49 | static unsigned long cyc2ns_scale; | 49 | static unsigned long cyc2ns_scale __read_mostly; |
| 50 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 50 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
| 51 | 51 | ||
| 52 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | 52 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index a7f5a2aceba2..5e41ee29c8cf 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
| @@ -74,7 +74,7 @@ late_initcall(start_lost_tick_compensation); | |||
| 74 | * | 74 | * |
| 75 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 75 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
| 76 | */ | 76 | */ |
| 77 | static unsigned long cyc2ns_scale; | 77 | static unsigned long cyc2ns_scale __read_mostly; |
| 78 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 78 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
| 79 | 79 | ||
| 80 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | 80 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) |
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index 67a0e1baa28b..296355292c7c 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c | |||
| @@ -41,6 +41,15 @@ int arch_register_cpu(int num){ | |||
| 41 | parent = &node_devices[node].node; | 41 | parent = &node_devices[node].node; |
| 42 | #endif /* CONFIG_NUMA */ | 42 | #endif /* CONFIG_NUMA */ |
| 43 | 43 | ||
| 44 | /* | ||
| 45 | * CPU0 cannot be offlined due to several | ||
| 46 | * restrictions and assumptions in kernel. This basically | ||
| 47 | * doesnt add a control file, one cannot attempt to offline | ||
| 48 | * BSP. | ||
| 49 | */ | ||
| 50 | if (!num) | ||
| 51 | cpu_devices[num].cpu.no_control = 1; | ||
| 52 | |||
| 44 | return register_cpu(&cpu_devices[num].cpu, num, parent); | 53 | return register_cpu(&cpu_devices[num].cpu, num, parent); |
| 45 | } | 54 | } |
| 46 | 55 | ||
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index b814dbdcc91e..de5386b01d38 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
| @@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb) | |||
| 99 | { | 99 | { |
| 100 | int err = 0; | 100 | int err = 0; |
| 101 | unsigned long flags; | 101 | unsigned long flags; |
| 102 | |||
| 103 | vmalloc_sync_all(); | ||
| 102 | spin_lock_irqsave(&die_notifier_lock, flags); | 104 | spin_lock_irqsave(&die_notifier_lock, flags); |
| 103 | err = notifier_chain_register(&i386die_chain, nb); | 105 | err = notifier_chain_register(&i386die_chain, nb); |
| 104 | spin_unlock_irqrestore(&die_notifier_lock, flags); | 106 | spin_unlock_irqrestore(&die_notifier_lock, flags); |
| @@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | |||
| 112 | p < (void *)tinfo + THREAD_SIZE - 3; | 114 | p < (void *)tinfo + THREAD_SIZE - 3; |
| 113 | } | 115 | } |
| 114 | 116 | ||
| 115 | static void print_addr_and_symbol(unsigned long addr, char *log_lvl) | 117 | /* |
| 118 | * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. | ||
| 119 | */ | ||
| 120 | static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, | ||
| 121 | int printed) | ||
| 116 | { | 122 | { |
| 117 | printk(log_lvl); | 123 | if (!printed) |
| 124 | printk(log_lvl); | ||
| 125 | |||
| 126 | #if CONFIG_STACK_BACKTRACE_COLS == 1 | ||
| 118 | printk(" [<%08lx>] ", addr); | 127 | printk(" [<%08lx>] ", addr); |
| 128 | #else | ||
| 129 | printk(" <%08lx> ", addr); | ||
| 130 | #endif | ||
| 119 | print_symbol("%s", addr); | 131 | print_symbol("%s", addr); |
| 120 | printk("\n"); | 132 | |
| 133 | printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; | ||
| 134 | |||
| 135 | if (printed) | ||
| 136 | printk(" "); | ||
| 137 | else | ||
| 138 | printk("\n"); | ||
| 139 | |||
| 140 | return printed; | ||
| 121 | } | 141 | } |
| 122 | 142 | ||
| 123 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 143 | static inline unsigned long print_context_stack(struct thread_info *tinfo, |
| @@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
| 125 | char *log_lvl) | 145 | char *log_lvl) |
| 126 | { | 146 | { |
| 127 | unsigned long addr; | 147 | unsigned long addr; |
| 148 | int printed = 0; /* nr of entries already printed on current line */ | ||
| 128 | 149 | ||
| 129 | #ifdef CONFIG_FRAME_POINTER | 150 | #ifdef CONFIG_FRAME_POINTER |
| 130 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 151 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
| 131 | addr = *(unsigned long *)(ebp + 4); | 152 | addr = *(unsigned long *)(ebp + 4); |
| 132 | print_addr_and_symbol(addr, log_lvl); | 153 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
| 133 | ebp = *(unsigned long *)ebp; | 154 | ebp = *(unsigned long *)ebp; |
| 134 | } | 155 | } |
| 135 | #else | 156 | #else |
| 136 | while (valid_stack_ptr(tinfo, stack)) { | 157 | while (valid_stack_ptr(tinfo, stack)) { |
| 137 | addr = *stack++; | 158 | addr = *stack++; |
| 138 | if (__kernel_text_address(addr)) | 159 | if (__kernel_text_address(addr)) |
| 139 | print_addr_and_symbol(addr, log_lvl); | 160 | printed = print_addr_and_symbol(addr, log_lvl, printed); |
| 140 | } | 161 | } |
| 141 | #endif | 162 | #endif |
| 163 | if (printed) | ||
| 164 | printk("\n"); | ||
| 165 | |||
| 142 | return ebp; | 166 | return ebp; |
| 143 | } | 167 | } |
| 144 | 168 | ||
| @@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task, | |||
| 166 | stack = (unsigned long*)context->previous_esp; | 190 | stack = (unsigned long*)context->previous_esp; |
| 167 | if (!stack) | 191 | if (!stack) |
| 168 | break; | 192 | break; |
| 169 | printk(log_lvl); | 193 | printk("%s =======================\n", log_lvl); |
| 170 | printk(" =======================\n"); | ||
| 171 | } | 194 | } |
| 172 | } | 195 | } |
| 173 | 196 | ||
| @@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, | |||
| 194 | for(i = 0; i < kstack_depth_to_print; i++) { | 217 | for(i = 0; i < kstack_depth_to_print; i++) { |
| 195 | if (kstack_end(stack)) | 218 | if (kstack_end(stack)) |
| 196 | break; | 219 | break; |
| 197 | if (i && ((i % 8) == 0)) { | 220 | if (i && ((i % 8) == 0)) |
| 198 | printk("\n"); | 221 | printk("\n%s ", log_lvl); |
| 199 | printk(log_lvl); | ||
| 200 | printk(" "); | ||
| 201 | } | ||
| 202 | printk("%08lx ", *stack++); | 222 | printk("%08lx ", *stack++); |
| 203 | } | 223 | } |
| 204 | printk("\n"); | 224 | printk("\n%sCall Trace:\n", log_lvl); |
| 205 | printk(log_lvl); | ||
| 206 | printk("Call Trace:\n"); | ||
| 207 | show_trace_log_lvl(task, esp, log_lvl); | 225 | show_trace_log_lvl(task, esp, log_lvl); |
| 208 | } | 226 | } |
| 209 | 227 | ||
| 210 | void show_stack(struct task_struct *task, unsigned long *esp) | 228 | void show_stack(struct task_struct *task, unsigned long *esp) |
| 211 | { | 229 | { |
| 230 | printk(" "); | ||
| 212 | show_stack_log_lvl(task, esp, ""); | 231 | show_stack_log_lvl(task, esp, ""); |
| 213 | } | 232 | } |
| 214 | 233 | ||
| @@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs) | |||
| 233 | 252 | ||
| 234 | esp = (unsigned long) (®s->esp); | 253 | esp = (unsigned long) (®s->esp); |
| 235 | savesegment(ss, ss); | 254 | savesegment(ss, ss); |
| 236 | if (user_mode(regs)) { | 255 | if (user_mode_vm(regs)) { |
| 237 | in_kernel = 0; | 256 | in_kernel = 0; |
| 238 | esp = regs->esp; | 257 | esp = regs->esp; |
| 239 | ss = regs->xss & 0xffff; | 258 | ss = regs->xss & 0xffff; |
| @@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
| 333 | static int die_counter; | 352 | static int die_counter; |
| 334 | unsigned long flags; | 353 | unsigned long flags; |
| 335 | 354 | ||
| 355 | oops_enter(); | ||
| 356 | |||
| 336 | if (die.lock_owner != raw_smp_processor_id()) { | 357 | if (die.lock_owner != raw_smp_processor_id()) { |
| 337 | console_verbose(); | 358 | console_verbose(); |
| 338 | spin_lock_irqsave(&die.lock, flags); | 359 | spin_lock_irqsave(&die.lock, flags); |
| @@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
| 385 | ssleep(5); | 406 | ssleep(5); |
| 386 | panic("Fatal exception"); | 407 | panic("Fatal exception"); |
| 387 | } | 408 | } |
| 409 | oops_exit(); | ||
| 388 | do_exit(SIGSEGV); | 410 | do_exit(SIGSEGV); |
| 389 | } | 411 | } |
| 390 | 412 | ||
| @@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg) | |||
| 623 | /* If we are in kernel we are probably nested up pretty bad | 645 | /* If we are in kernel we are probably nested up pretty bad |
| 624 | * and might aswell get out now while we still can. | 646 | * and might aswell get out now while we still can. |
| 625 | */ | 647 | */ |
| 626 | if (!user_mode(regs)) { | 648 | if (!user_mode_vm(regs)) { |
| 627 | current->thread.trap_no = 2; | 649 | current->thread.trap_no = 2; |
| 628 | crash_kexec(regs); | 650 | crash_kexec(regs); |
| 629 | } | 651 | } |
| @@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) | |||
| 694 | 716 | ||
| 695 | void set_nmi_callback(nmi_callback_t callback) | 717 | void set_nmi_callback(nmi_callback_t callback) |
| 696 | { | 718 | { |
| 719 | vmalloc_sync_all(); | ||
| 697 | rcu_assign_pointer(nmi_callback, callback); | 720 | rcu_assign_pointer(nmi_callback, callback); |
| 698 | } | 721 | } |
| 699 | EXPORT_SYMBOL_GPL(set_nmi_callback); | 722 | EXPORT_SYMBOL_GPL(set_nmi_callback); |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 4710195b6b74..3f21c6f6466d 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
| @@ -68,6 +68,26 @@ SECTIONS | |||
| 68 | *(.data.init_task) | 68 | *(.data.init_task) |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | /* might get freed after init */ | ||
| 72 | . = ALIGN(4096); | ||
| 73 | __smp_alt_begin = .; | ||
| 74 | __smp_alt_instructions = .; | ||
| 75 | .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { | ||
| 76 | *(.smp_altinstructions) | ||
| 77 | } | ||
| 78 | __smp_alt_instructions_end = .; | ||
| 79 | . = ALIGN(4); | ||
| 80 | __smp_locks = .; | ||
| 81 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | ||
| 82 | *(.smp_locks) | ||
| 83 | } | ||
| 84 | __smp_locks_end = .; | ||
| 85 | .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { | ||
| 86 | *(.smp_altinstr_replacement) | ||
| 87 | } | ||
| 88 | . = ALIGN(4096); | ||
| 89 | __smp_alt_end = .; | ||
| 90 | |||
| 71 | /* will be freed after init */ | 91 | /* will be freed after init */ |
| 72 | . = ALIGN(4096); /* Init code and data */ | 92 | . = ALIGN(4096); /* Init code and data */ |
| 73 | __init_begin = .; | 93 | __init_begin = .; |
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 76b728159403..3b62baa6a371 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S | |||
| @@ -21,6 +21,9 @@ | |||
| 21 | * instruction clobbers %esp, the user's %esp won't even survive entry | 21 | * instruction clobbers %esp, the user's %esp won't even survive entry |
| 22 | * into the kernel. We store %esp in %ebp. Code in entry.S must fetch | 22 | * into the kernel. We store %esp in %ebp. Code in entry.S must fetch |
| 23 | * arg6 from the stack. | 23 | * arg6 from the stack. |
| 24 | * | ||
| 25 | * You can not use this vsyscall for the clone() syscall because the | ||
| 26 | * three dwords on the parent stack do not get copied to the child. | ||
| 24 | */ | 27 | */ |
| 25 | .text | 28 | .text |
| 26 | .globl __kernel_vsyscall | 29 | .globl __kernel_vsyscall |
