diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-06 08:53:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-06 08:53:20 -0400 |
commit | f12e6a451aad671a724e61abce2b8b323f209355 (patch) | |
tree | e4969282d07f7682099291e59545744b3bb5dcac /arch | |
parent | 046fd53773cd87125f799b00422e487bf1428d38 (diff) | |
parent | dc44e65943169de2d1a1b494876f48a65a9737f1 (diff) |
Merge branch 'x86/cleanups' into x86/signal
Conflicts:
arch/x86/kernel/signal_64.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
31 files changed, 315 insertions, 316 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index ba7736cf2ec7..29c5fbf08392 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -137,14 +137,15 @@ relocated: | |||
137 | */ | 137 | */ |
138 | movl output_len(%ebx), %eax | 138 | movl output_len(%ebx), %eax |
139 | pushl %eax | 139 | pushl %eax |
140 | # push arguments for decompress_kernel: | ||
140 | pushl %ebp # output address | 141 | pushl %ebp # output address |
141 | movl input_len(%ebx), %eax | 142 | movl input_len(%ebx), %eax |
142 | pushl %eax # input_len | 143 | pushl %eax # input_len |
143 | leal input_data(%ebx), %eax | 144 | leal input_data(%ebx), %eax |
144 | pushl %eax # input_data | 145 | pushl %eax # input_data |
145 | leal boot_heap(%ebx), %eax | 146 | leal boot_heap(%ebx), %eax |
146 | pushl %eax # heap area as third argument | 147 | pushl %eax # heap area |
147 | pushl %esi # real mode pointer as second arg | 148 | pushl %esi # real mode pointer |
148 | call decompress_kernel | 149 | call decompress_kernel |
149 | addl $20, %esp | 150 | addl $20, %esp |
150 | popl %ecx | 151 | popl %ecx |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index aaf5a2131efc..5780d361105b 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <linux/screen_info.h> | 28 | #include <linux/screen_info.h> |
29 | #include <linux/elf.h> | 29 | #include <linux/elf.h> |
30 | #include <asm/io.h> | 30 | #include <linux/io.h> |
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <asm/boot.h> | 32 | #include <asm/boot.h> |
33 | #include <asm/bootparam.h> | 33 | #include <asm/bootparam.h> |
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s) | |||
251 | y--; | 251 | y--; |
252 | } | 252 | } |
253 | } else { | 253 | } else { |
254 | vidmem [(x + cols * y) * 2] = c; | 254 | vidmem[(x + cols * y) * 2] = c; |
255 | if (++x >= cols) { | 255 | if (++x >= cols) { |
256 | x = 0; | 256 | x = 0; |
257 | if (++y >= lines) { | 257 | if (++y >= lines) { |
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n) | |||
277 | int i; | 277 | int i; |
278 | char *ss = s; | 278 | char *ss = s; |
279 | 279 | ||
280 | for (i = 0; i < n; i++) ss[i] = c; | 280 | for (i = 0; i < n; i++) |
281 | ss[i] = c; | ||
281 | return s; | 282 | return s; |
282 | } | 283 | } |
283 | 284 | ||
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n) | |||
287 | const char *s = src; | 288 | const char *s = src; |
288 | char *d = dest; | 289 | char *d = dest; |
289 | 290 | ||
290 | for (i = 0; i < n; i++) d[i] = s[i]; | 291 | for (i = 0; i < n; i++) |
292 | d[i] = s[i]; | ||
291 | return dest; | 293 | return dest; |
292 | } | 294 | } |
293 | 295 | ||
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index a0e1dbe67dc1..127ec3f07214 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | |||
85 | dump->regs.ax = regs->ax; | 85 | dump->regs.ax = regs->ax; |
86 | dump->regs.ds = current->thread.ds; | 86 | dump->regs.ds = current->thread.ds; |
87 | dump->regs.es = current->thread.es; | 87 | dump->regs.es = current->thread.es; |
88 | asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; | 88 | savesegment(fs, fs); |
89 | asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; | 89 | dump->regs.fs = fs; |
90 | savesegment(gs, gs); | ||
91 | dump->regs.gs = gs; | ||
90 | dump->regs.orig_ax = regs->orig_ax; | 92 | dump->regs.orig_ax = regs->orig_ax; |
91 | dump->regs.ip = regs->ip; | 93 | dump->regs.ip = regs->ip; |
92 | dump->regs.cs = regs->cs; | 94 | dump->regs.cs = regs->cs; |
@@ -430,8 +432,9 @@ beyond_if: | |||
430 | current->mm->start_stack = | 432 | current->mm->start_stack = |
431 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); | 433 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); |
432 | /* start thread */ | 434 | /* start thread */ |
433 | asm volatile("movl %0,%%fs" :: "r" (0)); \ | 435 | loadsegment(fs, 0); |
434 | asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); | 436 | loadsegment(ds, __USER32_DS); |
437 | loadsegment(es, __USER32_DS); | ||
435 | load_gs_index(0); | 438 | load_gs_index(0); |
436 | (regs)->ip = ex.a_entry; | 439 | (regs)->ip = ex.a_entry; |
437 | (regs)->sp = current->mm->start_stack; | 440 | (regs)->sp = current->mm->start_stack; |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index f25a10124005..8d64c1bc8474 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -207,7 +207,7 @@ struct rt_sigframe | |||
207 | { unsigned int cur; \ | 207 | { unsigned int cur; \ |
208 | unsigned short pre; \ | 208 | unsigned short pre; \ |
209 | err |= __get_user(pre, &sc->seg); \ | 209 | err |= __get_user(pre, &sc->seg); \ |
210 | asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ | 210 | savesegment(seg, cur); \ |
211 | pre |= mask; \ | 211 | pre |= mask; \ |
212 | if (pre != cur) loadsegment(seg, pre); } | 212 | if (pre != cur) loadsegment(seg, pre); } |
213 | 213 | ||
@@ -236,7 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
236 | */ | 236 | */ |
237 | err |= __get_user(gs, &sc->gs); | 237 | err |= __get_user(gs, &sc->gs); |
238 | gs |= 3; | 238 | gs |= 3; |
239 | asm("movl %%gs,%0" : "=r" (oldgs)); | 239 | savesegment(gs, oldgs); |
240 | if (gs != oldgs) | 240 | if (gs != oldgs) |
241 | load_gs_index(gs); | 241 | load_gs_index(gs); |
242 | 242 | ||
@@ -342,14 +342,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
342 | { | 342 | { |
343 | int tmp, err = 0; | 343 | int tmp, err = 0; |
344 | 344 | ||
345 | tmp = 0; | 345 | savesegment(gs, tmp); |
346 | __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp)); | ||
347 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); | 346 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); |
348 | __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); | 347 | savesegment(fs, tmp); |
349 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); | 348 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); |
350 | __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); | 349 | savesegment(ds, tmp); |
351 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); | 350 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); |
352 | __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); | 351 | savesegment(es, tmp); |
353 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); | 352 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); |
354 | 353 | ||
355 | err |= __put_user((u32)regs->di, &sc->di); | 354 | err |= __put_user((u32)regs->di, &sc->di); |
@@ -491,8 +490,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
491 | regs->dx = 0; | 490 | regs->dx = 0; |
492 | regs->cx = 0; | 491 | regs->cx = 0; |
493 | 492 | ||
494 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 493 | loadsegment(ds, __USER32_DS); |
495 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 494 | loadsegment(es, __USER32_DS); |
496 | 495 | ||
497 | regs->cs = __USER32_CS; | 496 | regs->cs = __USER32_CS; |
498 | regs->ss = __USER32_DS; | 497 | regs->ss = __USER32_DS; |
@@ -588,8 +587,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
588 | regs->dx = (unsigned long) &frame->info; | 587 | regs->dx = (unsigned long) &frame->info; |
589 | regs->cx = (unsigned long) &frame->uc; | 588 | regs->cx = (unsigned long) &frame->uc; |
590 | 589 | ||
591 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 590 | loadsegment(ds, __USER32_DS); |
592 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 591 | loadsegment(es, __USER32_DS); |
593 | 592 | ||
594 | regs->cs = __USER32_CS; | 593 | regs->cs = __USER32_CS; |
595 | regs->ss = __USER32_DS; | 594 | regs->ss = __USER32_DS; |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 27ef365e757d..267e684f33a7 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled); | |||
58 | #ifdef CONFIG_X86_64 | 58 | #ifdef CONFIG_X86_64 |
59 | 59 | ||
60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
61 | #include <asm/genapic.h> | ||
62 | 61 | ||
63 | #else /* X86 */ | 62 | #else /* X86 */ |
64 | 63 | ||
@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | |||
97 | #warning ACPI uses CMPXCHG, i486 and later hardware | 96 | #warning ACPI uses CMPXCHG, i486 and later hardware |
98 | #endif | 97 | #endif |
99 | 98 | ||
100 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | ||
101 | |||
102 | /* -------------------------------------------------------------------------- | 99 | /* -------------------------------------------------------------------------- |
103 | Boot-time Configuration | 100 | Boot-time Configuration |
104 | -------------------------------------------------------------------------- */ | 101 | -------------------------------------------------------------------------- */ |
@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) | |||
160 | struct acpi_mcfg_allocation *pci_mmcfg_config; | 157 | struct acpi_mcfg_allocation *pci_mmcfg_config; |
161 | int pci_mmcfg_config_num; | 158 | int pci_mmcfg_config_num; |
162 | 159 | ||
160 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | ||
161 | |||
163 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) | 162 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) |
164 | { | 163 | { |
165 | if (!strcmp(mcfg->header.oem_id, "SGI")) | 164 | if (!strcmp(mcfg->header.oem_id, "SGI")) |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 9ee24e6bc4b0..b93d069aea72 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -228,7 +228,6 @@ | |||
228 | #include <linux/suspend.h> | 228 | #include <linux/suspend.h> |
229 | #include <linux/kthread.h> | 229 | #include <linux/kthread.h> |
230 | #include <linux/jiffies.h> | 230 | #include <linux/jiffies.h> |
231 | #include <linux/smp_lock.h> | ||
232 | 231 | ||
233 | #include <asm/system.h> | 232 | #include <asm/system.h> |
234 | #include <asm/uaccess.h> | 233 | #include <asm/uaccess.h> |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index c639bd55391c..fdd585f9c53d 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -25,11 +25,11 @@ x86_bios_strerror(long status) | |||
25 | { | 25 | { |
26 | const char *str; | 26 | const char *str; |
27 | switch (status) { | 27 | switch (status) { |
28 | case 0: str = "Call completed without error"; break; | 28 | case 0: str = "Call completed without error"; break; |
29 | case -1: str = "Not implemented"; break; | 29 | case -1: str = "Not implemented"; break; |
30 | case -2: str = "Invalid argument"; break; | 30 | case -2: str = "Invalid argument"; break; |
31 | case -3: str = "Call completed with error"; break; | 31 | case -3: str = "Call completed with error"; break; |
32 | default: str = "Unknown BIOS status code"; break; | 32 | default: str = "Unknown BIOS status code"; break; |
33 | } | 33 | } |
34 | return str; | 34 | return str; |
35 | } | 35 | } |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 8e9cd6a8ec12..6a44d6465991 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/smp_lock.h> | 36 | #include <linux/smp_lock.h> |
37 | #include <linux/major.h> | 37 | #include <linux/major.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/device.h> | 39 | #include <linux/device.h> |
41 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
42 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 15e6c6bc4a46..d3e524c84527 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c | |||
@@ -7,9 +7,8 @@ | |||
7 | 7 | ||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/crash_dump.h> | 9 | #include <linux/crash_dump.h> |
10 | 10 | #include <linux/uaccess.h> | |
11 | #include <asm/uaccess.h> | 11 | #include <linux/io.h> |
12 | #include <asm/io.h> | ||
13 | 12 | ||
14 | /** | 13 | /** |
15 | * copy_oldmem_page - copy one page from "oldmem" | 14 | * copy_oldmem_page - copy one page from "oldmem" |
@@ -25,7 +24,7 @@ | |||
25 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 24 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
26 | */ | 25 | */ |
27 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 26 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
28 | size_t csize, unsigned long offset, int userbuf) | 27 | size_t csize, unsigned long offset, int userbuf) |
29 | { | 28 | { |
30 | void *vaddr; | 29 | void *vaddr; |
31 | 30 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1cf8c1fcc088..b71e02d42f4f 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -325,7 +325,7 @@ skip: | |||
325 | for_each_online_cpu(j) | 325 | for_each_online_cpu(j) |
326 | seq_printf(p, "%10u ", | 326 | seq_printf(p, "%10u ", |
327 | per_cpu(irq_stat,j).irq_call_count); | 327 | per_cpu(irq_stat,j).irq_call_count); |
328 | seq_printf(p, " function call interrupts\n"); | 328 | seq_printf(p, " Function call interrupts\n"); |
329 | seq_printf(p, "TLB: "); | 329 | seq_printf(p, "TLB: "); |
330 | for_each_online_cpu(j) | 330 | for_each_online_cpu(j) |
331 | seq_printf(p, "%10u ", | 331 | seq_printf(p, "%10u ", |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1f78b238d8d2..f065fe9071b9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -129,7 +129,7 @@ skip: | |||
129 | seq_printf(p, "CAL: "); | 129 | seq_printf(p, "CAL: "); |
130 | for_each_online_cpu(j) | 130 | for_each_online_cpu(j) |
131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); | 131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); |
132 | seq_printf(p, " function call interrupts\n"); | 132 | seq_printf(p, " Function call interrupts\n"); |
133 | seq_printf(p, "TLB: "); | 133 | seq_printf(p, "TLB: "); |
134 | for_each_online_cpu(j) | 134 | for_each_online_cpu(j) |
135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); | 135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); |
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 58262218781b..9fe644f4861d 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
23 | start = start_##ops##_##x; \ | 23 | start = start_##ops##_##x; \ |
24 | end = end_##ops##_##x; \ | 24 | end = end_##ops##_##x; \ |
25 | goto patch_site | 25 | goto patch_site |
26 | switch(type) { | 26 | switch (type) { |
27 | PATCH_SITE(pv_irq_ops, irq_disable); | 27 | PATCH_SITE(pv_irq_ops, irq_disable); |
28 | PATCH_SITE(pv_irq_ops, irq_enable); | 28 | PATCH_SITE(pv_irq_ops, irq_enable); |
29 | PATCH_SITE(pv_irq_ops, restore_fl); | 29 | PATCH_SITE(pv_irq_ops, restore_fl); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 87d4d6964ec2..f704cb51ff82 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void) | |||
82 | * using 512M as goal | 82 | * using 512M as goal |
83 | */ | 83 | */ |
84 | align = 64ULL<<20; | 84 | align = 64ULL<<20; |
85 | size = round_up(dma32_bootmem_size, align); | 85 | size = roundup(dma32_bootmem_size, align); |
86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
87 | 512ULL<<20); | 87 | 512ULL<<20); |
88 | if (dma32_bootmem_ptr) | 88 | if (dma32_bootmem_ptr) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 00263c9e6500..7502508a7664 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -37,11 +37,11 @@ | |||
37 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/io.h> | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/i387.h> | 46 | #include <asm/i387.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -89,7 +89,7 @@ void exit_idle(void) | |||
89 | #ifdef CONFIG_HOTPLUG_CPU | 89 | #ifdef CONFIG_HOTPLUG_CPU |
90 | DECLARE_PER_CPU(int, cpu_state); | 90 | DECLARE_PER_CPU(int, cpu_state); |
91 | 91 | ||
92 | #include <asm/nmi.h> | 92 | #include <linux/nmi.h> |
93 | /* We halt the CPU with physical CPU hotplug */ | 93 | /* We halt the CPU with physical CPU hotplug */ |
94 | static inline void play_dead(void) | 94 | static inline void play_dead(void) |
95 | { | 95 | { |
@@ -152,7 +152,7 @@ void cpu_idle(void) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /* Prints also some state that isn't saved in the pt_regs */ | 154 | /* Prints also some state that isn't saved in the pt_regs */ |
155 | void __show_regs(struct pt_regs * regs) | 155 | void __show_regs(struct pt_regs *regs) |
156 | { | 156 | { |
157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
158 | unsigned long d0, d1, d2, d3, d6, d7; | 158 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -161,59 +161,61 @@ void __show_regs(struct pt_regs * regs) | |||
161 | 161 | ||
162 | printk("\n"); | 162 | printk("\n"); |
163 | print_modules(); | 163 | print_modules(); |
164 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 164 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", |
165 | current->pid, current->comm, print_tainted(), | 165 | current->pid, current->comm, print_tainted(), |
166 | init_utsname()->release, | 166 | init_utsname()->release, |
167 | (int)strcspn(init_utsname()->version, " "), | 167 | (int)strcspn(init_utsname()->version, " "), |
168 | init_utsname()->version); | 168 | init_utsname()->version); |
169 | printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 169 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
170 | printk_address(regs->ip, 1); | 170 | printk_address(regs->ip, 1); |
171 | printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, | 171 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
172 | regs->flags); | 172 | regs->sp, regs->flags); |
173 | printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", | 173 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
174 | regs->ax, regs->bx, regs->cx); | 174 | regs->ax, regs->bx, regs->cx); |
175 | printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", | 175 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
176 | regs->dx, regs->si, regs->di); | 176 | regs->dx, regs->si, regs->di); |
177 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | 177 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", |
178 | regs->bp, regs->r8, regs->r9); | 178 | regs->bp, regs->r8, regs->r9); |
179 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | 179 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", |
180 | regs->r10, regs->r11, regs->r12); | 180 | regs->r10, regs->r11, regs->r12); |
181 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | 181 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", |
182 | regs->r13, regs->r14, regs->r15); | 182 | regs->r13, regs->r14, regs->r15); |
183 | 183 | ||
184 | asm("movl %%ds,%0" : "=r" (ds)); | 184 | asm("movl %%ds,%0" : "=r" (ds)); |
185 | asm("movl %%cs,%0" : "=r" (cs)); | 185 | asm("movl %%cs,%0" : "=r" (cs)); |
186 | asm("movl %%es,%0" : "=r" (es)); | 186 | asm("movl %%es,%0" : "=r" (es)); |
187 | asm("movl %%fs,%0" : "=r" (fsindex)); | 187 | asm("movl %%fs,%0" : "=r" (fsindex)); |
188 | asm("movl %%gs,%0" : "=r" (gsindex)); | 188 | asm("movl %%gs,%0" : "=r" (gsindex)); |
189 | 189 | ||
190 | rdmsrl(MSR_FS_BASE, fs); | 190 | rdmsrl(MSR_FS_BASE, fs); |
191 | rdmsrl(MSR_GS_BASE, gs); | 191 | rdmsrl(MSR_GS_BASE, gs); |
192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
193 | 193 | ||
194 | cr0 = read_cr0(); | 194 | cr0 = read_cr0(); |
195 | cr2 = read_cr2(); | 195 | cr2 = read_cr2(); |
196 | cr3 = read_cr3(); | 196 | cr3 = read_cr3(); |
197 | cr4 = read_cr4(); | 197 | cr4 = read_cr4(); |
198 | 198 | ||
199 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 199 | printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
200 | fs,fsindex,gs,gsindex,shadowgs); | 200 | fs, fsindex, gs, gsindex, shadowgs); |
201 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | 201 | printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, |
202 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | 202 | es, cr0); |
203 | printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, | ||
204 | cr4); | ||
203 | 205 | ||
204 | get_debugreg(d0, 0); | 206 | get_debugreg(d0, 0); |
205 | get_debugreg(d1, 1); | 207 | get_debugreg(d1, 1); |
206 | get_debugreg(d2, 2); | 208 | get_debugreg(d2, 2); |
207 | printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | 209 | printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); |
208 | get_debugreg(d3, 3); | 210 | get_debugreg(d3, 3); |
209 | get_debugreg(d6, 6); | 211 | get_debugreg(d6, 6); |
210 | get_debugreg(d7, 7); | 212 | get_debugreg(d7, 7); |
211 | printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 213 | printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
212 | } | 214 | } |
213 | 215 | ||
214 | void show_regs(struct pt_regs *regs) | 216 | void show_regs(struct pt_regs *regs) |
215 | { | 217 | { |
216 | printk("CPU %d:", smp_processor_id()); | 218 | printk(KERN_INFO "CPU %d:", smp_processor_id()); |
217 | __show_regs(regs); | 219 | __show_regs(regs); |
218 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 220 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
219 | } | 221 | } |
@@ -314,10 +316,10 @@ void prepare_to_copy(struct task_struct *tsk) | |||
314 | 316 | ||
315 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 317 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
316 | unsigned long unused, | 318 | unsigned long unused, |
317 | struct task_struct * p, struct pt_regs * regs) | 319 | struct task_struct *p, struct pt_regs *regs) |
318 | { | 320 | { |
319 | int err; | 321 | int err; |
320 | struct pt_regs * childregs; | 322 | struct pt_regs *childregs; |
321 | struct task_struct *me = current; | 323 | struct task_struct *me = current; |
322 | 324 | ||
323 | childregs = ((struct pt_regs *) | 325 | childregs = ((struct pt_regs *) |
@@ -362,10 +364,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
362 | if (test_thread_flag(TIF_IA32)) | 364 | if (test_thread_flag(TIF_IA32)) |
363 | err = do_set_thread_area(p, -1, | 365 | err = do_set_thread_area(p, -1, |
364 | (struct user_desc __user *)childregs->si, 0); | 366 | (struct user_desc __user *)childregs->si, 0); |
365 | else | 367 | else |
366 | #endif | 368 | #endif |
367 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 369 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); |
368 | if (err) | 370 | if (err) |
369 | goto out; | 371 | goto out; |
370 | } | 372 | } |
371 | err = 0; | 373 | err = 0; |
@@ -544,7 +546,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
544 | unsigned fsindex, gsindex; | 546 | unsigned fsindex, gsindex; |
545 | 547 | ||
546 | /* we're going to use this soon, after a few expensive things */ | 548 | /* we're going to use this soon, after a few expensive things */ |
547 | if (next_p->fpu_counter>5) | 549 | if (next_p->fpu_counter > 5) |
548 | prefetch(next->xstate); | 550 | prefetch(next->xstate); |
549 | 551 | ||
550 | /* | 552 | /* |
@@ -552,13 +554,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
552 | */ | 554 | */ |
553 | load_sp0(tss, next); | 555 | load_sp0(tss, next); |
554 | 556 | ||
555 | /* | 557 | /* |
556 | * Switch DS and ES. | 558 | * Switch DS and ES. |
557 | * This won't pick up thread selector changes, but I guess that is ok. | 559 | * This won't pick up thread selector changes, but I guess that is ok. |
558 | */ | 560 | */ |
559 | savesegment(es, prev->es); | 561 | savesegment(es, prev->es); |
560 | if (unlikely(next->es | prev->es)) | 562 | if (unlikely(next->es | prev->es)) |
561 | loadsegment(es, next->es); | 563 | loadsegment(es, next->es); |
562 | 564 | ||
563 | savesegment(ds, prev->ds); | 565 | savesegment(ds, prev->ds); |
564 | if (unlikely(next->ds | prev->ds)) | 566 | if (unlikely(next->ds | prev->ds)) |
@@ -584,7 +586,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
584 | */ | 586 | */ |
585 | arch_leave_lazy_cpu_mode(); | 587 | arch_leave_lazy_cpu_mode(); |
586 | 588 | ||
587 | /* | 589 | /* |
588 | * Switch FS and GS. | 590 | * Switch FS and GS. |
589 | * | 591 | * |
590 | * Segment register != 0 always requires a reload. Also | 592 | * Segment register != 0 always requires a reload. Also |
@@ -593,13 +595,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
593 | */ | 595 | */ |
594 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 596 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
595 | loadsegment(fs, next->fsindex); | 597 | loadsegment(fs, next->fsindex); |
596 | /* | 598 | /* |
597 | * Check if the user used a selector != 0; if yes | 599 | * Check if the user used a selector != 0; if yes |
598 | * clear 64bit base, since overloaded base is always | 600 | * clear 64bit base, since overloaded base is always |
599 | * mapped to the Null selector | 601 | * mapped to the Null selector |
600 | */ | 602 | */ |
601 | if (fsindex) | 603 | if (fsindex) |
602 | prev->fs = 0; | 604 | prev->fs = 0; |
603 | } | 605 | } |
604 | /* when next process has a 64bit base use it */ | 606 | /* when next process has a 64bit base use it */ |
605 | if (next->fs) | 607 | if (next->fs) |
@@ -609,7 +611,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 611 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
610 | load_gs_index(next->gsindex); | 612 | load_gs_index(next->gsindex); |
611 | if (gsindex) | 613 | if (gsindex) |
612 | prev->gs = 0; | 614 | prev->gs = 0; |
613 | } | 615 | } |
614 | if (next->gs) | 616 | if (next->gs) |
615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 617 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
@@ -618,12 +620,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
618 | /* Must be after DS reload */ | 620 | /* Must be after DS reload */ |
619 | unlazy_fpu(prev_p); | 621 | unlazy_fpu(prev_p); |
620 | 622 | ||
621 | /* | 623 | /* |
622 | * Switch the PDA and FPU contexts. | 624 | * Switch the PDA and FPU contexts. |
623 | */ | 625 | */ |
624 | prev->usersp = read_pda(oldrsp); | 626 | prev->usersp = read_pda(oldrsp); |
625 | write_pda(oldrsp, next->usersp); | 627 | write_pda(oldrsp, next->usersp); |
626 | write_pda(pcurrent, next_p); | 628 | write_pda(pcurrent, next_p); |
627 | 629 | ||
628 | write_pda(kernelstack, | 630 | write_pda(kernelstack, |
629 | (unsigned long)task_stack_page(next_p) + | 631 | (unsigned long)task_stack_page(next_p) + |
@@ -664,7 +666,7 @@ long sys_execve(char __user *name, char __user * __user *argv, | |||
664 | char __user * __user *envp, struct pt_regs *regs) | 666 | char __user * __user *envp, struct pt_regs *regs) |
665 | { | 667 | { |
666 | long error; | 668 | long error; |
667 | char * filename; | 669 | char *filename; |
668 | 670 | ||
669 | filename = getname(name); | 671 | filename = getname(name); |
670 | error = PTR_ERR(filename); | 672 | error = PTR_ERR(filename); |
@@ -722,55 +724,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) | |||
722 | unsigned long get_wchan(struct task_struct *p) | 724 | unsigned long get_wchan(struct task_struct *p) |
723 | { | 725 | { |
724 | unsigned long stack; | 726 | unsigned long stack; |
725 | u64 fp,ip; | 727 | u64 fp, ip; |
726 | int count = 0; | 728 | int count = 0; |
727 | 729 | ||
728 | if (!p || p == current || p->state==TASK_RUNNING) | 730 | if (!p || p == current || p->state == TASK_RUNNING) |
729 | return 0; | 731 | return 0; |
730 | stack = (unsigned long)task_stack_page(p); | 732 | stack = (unsigned long)task_stack_page(p); |
731 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) | 733 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) |
732 | return 0; | 734 | return 0; |
733 | fp = *(u64 *)(p->thread.sp); | 735 | fp = *(u64 *)(p->thread.sp); |
734 | do { | 736 | do { |
735 | if (fp < (unsigned long)stack || | 737 | if (fp < (unsigned long)stack || |
736 | fp > (unsigned long)stack+THREAD_SIZE) | 738 | fp > (unsigned long)stack+THREAD_SIZE) |
737 | return 0; | 739 | return 0; |
738 | ip = *(u64 *)(fp+8); | 740 | ip = *(u64 *)(fp+8); |
739 | if (!in_sched_functions(ip)) | 741 | if (!in_sched_functions(ip)) |
740 | return ip; | 742 | return ip; |
741 | fp = *(u64 *)fp; | 743 | fp = *(u64 *)fp; |
742 | } while (count++ < 16); | 744 | } while (count++ < 16); |
743 | return 0; | 745 | return 0; |
744 | } | 746 | } |
745 | 747 | ||
746 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 748 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
747 | { | 749 | { |
748 | int ret = 0; | 750 | int ret = 0; |
749 | int doit = task == current; | 751 | int doit = task == current; |
750 | int cpu; | 752 | int cpu; |
751 | 753 | ||
752 | switch (code) { | 754 | switch (code) { |
753 | case ARCH_SET_GS: | 755 | case ARCH_SET_GS: |
754 | if (addr >= TASK_SIZE_OF(task)) | 756 | if (addr >= TASK_SIZE_OF(task)) |
755 | return -EPERM; | 757 | return -EPERM; |
756 | cpu = get_cpu(); | 758 | cpu = get_cpu(); |
757 | /* handle small bases via the GDT because that's faster to | 759 | /* handle small bases via the GDT because that's faster to |
758 | switch. */ | 760 | switch. */ |
759 | if (addr <= 0xffffffff) { | 761 | if (addr <= 0xffffffff) { |
760 | set_32bit_tls(task, GS_TLS, addr); | 762 | set_32bit_tls(task, GS_TLS, addr); |
761 | if (doit) { | 763 | if (doit) { |
762 | load_TLS(&task->thread, cpu); | 764 | load_TLS(&task->thread, cpu); |
763 | load_gs_index(GS_TLS_SEL); | 765 | load_gs_index(GS_TLS_SEL); |
764 | } | 766 | } |
765 | task->thread.gsindex = GS_TLS_SEL; | 767 | task->thread.gsindex = GS_TLS_SEL; |
766 | task->thread.gs = 0; | 768 | task->thread.gs = 0; |
767 | } else { | 769 | } else { |
768 | task->thread.gsindex = 0; | 770 | task->thread.gsindex = 0; |
769 | task->thread.gs = addr; | 771 | task->thread.gs = addr; |
770 | if (doit) { | 772 | if (doit) { |
771 | load_gs_index(0); | 773 | load_gs_index(0); |
772 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 774 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); |
773 | } | 775 | } |
774 | } | 776 | } |
775 | put_cpu(); | 777 | put_cpu(); |
776 | break; | 778 | break; |
@@ -824,8 +826,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
824 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 826 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
825 | else | 827 | else |
826 | base = task->thread.gs; | 828 | base = task->thread.gs; |
827 | } | 829 | } else |
828 | else | ||
829 | base = task->thread.gs; | 830 | base = task->thread.gs; |
830 | ret = put_user(base, (unsigned long __user *)addr); | 831 | ret = put_user(base, (unsigned long __user *)addr); |
831 | break; | 832 | break; |
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h index 6dd7e2b70a4b..cc673aa55ce4 100644 --- a/arch/x86/kernel/sigframe.h +++ b/arch/x86/kernel/sigframe.h | |||
@@ -34,4 +34,9 @@ struct rt_sigframe { | |||
34 | struct siginfo info; | 34 | struct siginfo info; |
35 | /* fp state follows here */ | 35 | /* fp state follows here */ |
36 | }; | 36 | }; |
37 | |||
38 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
39 | sigset_t *set, struct pt_regs *regs); | ||
40 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
41 | sigset_t *set, struct pt_regs *regs); | ||
37 | #endif | 42 | #endif |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 2f28252d2d26..823a55bf8c39 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -20,9 +20,10 @@ | |||
20 | #include <linux/stddef.h> | 20 | #include <linux/stddef.h> |
21 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/uaccess.h> | ||
24 | |||
23 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
24 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
27 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
28 | #include <asm/ia32_unistd.h> | 29 | #include <asm/ia32_unistd.h> |
@@ -44,11 +45,6 @@ | |||
44 | # define FIX_EFLAGS __FIX_EFLAGS | 45 | # define FIX_EFLAGS __FIX_EFLAGS |
45 | #endif | 46 | #endif |
46 | 47 | ||
47 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
48 | sigset_t *set, struct pt_regs * regs); | ||
49 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
50 | sigset_t *set, struct pt_regs * regs); | ||
51 | |||
52 | asmlinkage long | 48 | asmlinkage long |
53 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 49 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
54 | struct pt_regs *regs) | 50 | struct pt_regs *regs) |
@@ -68,7 +64,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
68 | /* Always make any pending restarted system calls return -EINTR */ | 64 | /* Always make any pending restarted system calls return -EINTR */ |
69 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 65 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
70 | 66 | ||
71 | #define COPY(x) err |= __get_user(regs->x, &sc->x) | 67 | #define COPY(x) (err |= __get_user(regs->x, &sc->x)) |
72 | 68 | ||
73 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); | 69 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
74 | COPY(dx); COPY(cx); COPY(ip); | 70 | COPY(dx); COPY(cx); COPY(ip); |
@@ -98,7 +94,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
98 | } | 94 | } |
99 | 95 | ||
100 | { | 96 | { |
101 | struct _fpstate __user * buf; | 97 | struct _fpstate __user *buf; |
102 | err |= __get_user(buf, &sc->fpstate); | 98 | err |= __get_user(buf, &sc->fpstate); |
103 | err |= restore_i387_xstate(buf); | 99 | err |= restore_i387_xstate(buf); |
104 | } | 100 | } |
@@ -124,7 +120,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
124 | current->blocked = set; | 120 | current->blocked = set; |
125 | recalc_sigpending(); | 121 | recalc_sigpending(); |
126 | spin_unlock_irq(¤t->sighand->siglock); | 122 | spin_unlock_irq(¤t->sighand->siglock); |
127 | 123 | ||
128 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 124 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
129 | goto badframe; | 125 | goto badframe; |
130 | 126 | ||
@@ -134,16 +130,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
134 | return ax; | 130 | return ax; |
135 | 131 | ||
136 | badframe: | 132 | badframe: |
137 | signal_fault(regs,frame,"sigreturn"); | 133 | signal_fault(regs, frame, "sigreturn"); |
138 | return 0; | 134 | return 0; |
139 | } | 135 | } |
140 | 136 | ||
141 | /* | 137 | /* |
142 | * Set up a signal frame. | 138 | * Set up a signal frame. |
143 | */ | 139 | */ |
144 | 140 | ||
145 | static inline int | 141 | static inline int |
146 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) | 142 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
143 | unsigned long mask, struct task_struct *me) | ||
147 | { | 144 | { |
148 | int err = 0; | 145 | int err = 0; |
149 | 146 | ||
@@ -199,7 +196,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) | |||
199 | } | 196 | } |
200 | 197 | ||
201 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 198 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
202 | sigset_t *set, struct pt_regs * regs) | 199 | sigset_t *set, struct pt_regs *regs) |
203 | { | 200 | { |
204 | struct rt_sigframe __user *frame; | 201 | struct rt_sigframe __user *frame; |
205 | void __user *fp = NULL; | 202 | void __user *fp = NULL; |
@@ -212,19 +209,19 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
212 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | 209 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; |
213 | 210 | ||
214 | if (save_i387_xstate(fp) < 0) | 211 | if (save_i387_xstate(fp) < 0) |
215 | err |= -1; | 212 | err |= -1; |
216 | } else | 213 | } else |
217 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; | 214 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; |
218 | 215 | ||
219 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 216 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
220 | goto give_sigsegv; | 217 | goto give_sigsegv; |
221 | 218 | ||
222 | if (ka->sa.sa_flags & SA_SIGINFO) { | 219 | if (ka->sa.sa_flags & SA_SIGINFO) { |
223 | err |= copy_siginfo_to_user(&frame->info, info); | 220 | err |= copy_siginfo_to_user(&frame->info, info); |
224 | if (err) | 221 | if (err) |
225 | goto give_sigsegv; | 222 | goto give_sigsegv; |
226 | } | 223 | } |
227 | 224 | ||
228 | /* Create the ucontext. */ | 225 | /* Create the ucontext. */ |
229 | if (cpu_has_xsave) | 226 | if (cpu_has_xsave) |
230 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | 227 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); |
@@ -237,9 +234,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
237 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | 234 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); |
238 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); | 235 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); |
239 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); | 236 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); |
240 | if (sizeof(*set) == 16) { | 237 | if (sizeof(*set) == 16) { |
241 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | 238 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); |
242 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | 239 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); |
243 | } else | 240 | } else |
244 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 241 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
245 | 242 | ||
@@ -250,7 +247,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
250 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | 247 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); |
251 | } else { | 248 | } else { |
252 | /* could use a vstub here */ | 249 | /* could use a vstub here */ |
253 | goto give_sigsegv; | 250 | goto give_sigsegv; |
254 | } | 251 | } |
255 | 252 | ||
256 | if (err) | 253 | if (err) |
@@ -258,7 +255,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
258 | 255 | ||
259 | /* Set up registers for signal handler */ | 256 | /* Set up registers for signal handler */ |
260 | regs->di = sig; | 257 | regs->di = sig; |
261 | /* In case the signal handler was declared without prototypes */ | 258 | /* In case the signal handler was declared without prototypes */ |
262 | regs->ax = 0; | 259 | regs->ax = 0; |
263 | 260 | ||
264 | /* This also works for non SA_SIGINFO handlers because they expect the | 261 | /* This also works for non SA_SIGINFO handlers because they expect the |
@@ -282,7 +279,7 @@ give_sigsegv: | |||
282 | 279 | ||
283 | /* | 280 | /* |
284 | * OK, we're invoking a handler | 281 | * OK, we're invoking a handler |
285 | */ | 282 | */ |
286 | 283 | ||
287 | static int | 284 | static int |
288 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 285 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
@@ -326,7 +323,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
326 | ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); | 323 | ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); |
327 | else | 324 | else |
328 | ret = ia32_setup_frame(sig, ka, oldset, regs); | 325 | ret = ia32_setup_frame(sig, ka, oldset, regs); |
329 | } else | 326 | } else |
330 | #endif | 327 | #endif |
331 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 328 | ret = setup_rt_frame(sig, ka, info, oldset, regs); |
332 | 329 | ||
@@ -352,9 +349,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
352 | regs->flags &= ~X86_EFLAGS_TF; | 349 | regs->flags &= ~X86_EFLAGS_TF; |
353 | 350 | ||
354 | spin_lock_irq(¤t->sighand->siglock); | 351 | spin_lock_irq(¤t->sighand->siglock); |
355 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 352 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
356 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 353 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
357 | sigaddset(¤t->blocked,sig); | 354 | sigaddset(¤t->blocked, sig); |
358 | recalc_sigpending(); | 355 | recalc_sigpending(); |
359 | spin_unlock_irq(¤t->sighand->siglock); | 356 | spin_unlock_irq(¤t->sighand->siglock); |
360 | 357 | ||
@@ -464,14 +461,15 @@ void do_notify_resume(struct pt_regs *regs, void *unused, | |||
464 | } | 461 | } |
465 | 462 | ||
466 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 463 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
467 | { | 464 | { |
468 | struct task_struct *me = current; | 465 | struct task_struct *me = current; |
469 | if (show_unhandled_signals && printk_ratelimit()) { | 466 | if (show_unhandled_signals && printk_ratelimit()) { |
470 | printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", | 467 | printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
471 | me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); | 468 | me->comm, me->pid, where, frame, regs->ip, |
469 | regs->sp, regs->orig_ax); | ||
472 | print_vma_addr(" in ", regs->ip); | 470 | print_vma_addr(" in ", regs->ip); |
473 | printk("\n"); | 471 | printk("\n"); |
474 | } | 472 | } |
475 | 473 | ||
476 | force_sig(SIGSEGV, me); | 474 | force_sig(SIGSEGV, me); |
477 | } | 475 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index aa804c64b167..2ff0bbcd5bd1 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1313,16 +1313,13 @@ __init void prefill_possible_map(void) | |||
1313 | if (!num_processors) | 1313 | if (!num_processors) |
1314 | num_processors = 1; | 1314 | num_processors = 1; |
1315 | 1315 | ||
1316 | #ifdef CONFIG_HOTPLUG_CPU | ||
1317 | if (additional_cpus == -1) { | 1316 | if (additional_cpus == -1) { |
1318 | if (disabled_cpus > 0) | 1317 | if (disabled_cpus > 0) |
1319 | additional_cpus = disabled_cpus; | 1318 | additional_cpus = disabled_cpus; |
1320 | else | 1319 | else |
1321 | additional_cpus = 0; | 1320 | additional_cpus = 0; |
1322 | } | 1321 | } |
1323 | #else | 1322 | |
1324 | additional_cpus = 0; | ||
1325 | #endif | ||
1326 | possible = num_processors + additional_cpus; | 1323 | possible = num_processors + additional_cpus; |
1327 | if (possible > NR_CPUS) | 1324 | if (possible > NR_CPUS) |
1328 | possible = NR_CPUS; | 1325 | possible = NR_CPUS; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index c9288c883e20..6bc211accf08 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -13,16 +13,17 @@ | |||
13 | #include <linux/utsname.h> | 13 | #include <linux/utsname.h> |
14 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/uaccess.h> | ||
16 | 17 | ||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | 19 | #include <asm/syscalls.h> |
20 | 20 | ||
21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, | 21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, |
22 | unsigned long fd, unsigned long off) | 22 | unsigned long prot, unsigned long flags, |
23 | unsigned long fd, unsigned long off) | ||
23 | { | 24 | { |
24 | long error; | 25 | long error; |
25 | struct file * file; | 26 | struct file *file; |
26 | 27 | ||
27 | error = -EINVAL; | 28 | error = -EINVAL; |
28 | if (off & ~PAGE_MASK) | 29 | if (off & ~PAGE_MASK) |
@@ -57,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
57 | unmapped base down for this case. This can give | 58 | unmapped base down for this case. This can give |
58 | conflicts with the heap, but we assume that glibc | 59 | conflicts with the heap, but we assume that glibc |
59 | malloc knows how to fall back to mmap. Give it 1GB | 60 | malloc knows how to fall back to mmap. Give it 1GB |
60 | of playground for now. -AK */ | 61 | of playground for now. -AK */ |
61 | *begin = 0x40000000; | 62 | *begin = 0x40000000; |
62 | *end = 0x80000000; | 63 | *end = 0x80000000; |
63 | if (current->flags & PF_RANDOMIZE) { | 64 | if (current->flags & PF_RANDOMIZE) { |
64 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | 65 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); |
65 | if (new_begin) | 66 | if (new_begin) |
@@ -67,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
67 | } | 68 | } |
68 | } else { | 69 | } else { |
69 | *begin = TASK_UNMAPPED_BASE; | 70 | *begin = TASK_UNMAPPED_BASE; |
70 | *end = TASK_SIZE; | 71 | *end = TASK_SIZE; |
71 | } | 72 | } |
72 | } | 73 | } |
73 | 74 | ||
74 | unsigned long | 75 | unsigned long |
75 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | 76 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
@@ -79,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
79 | struct vm_area_struct *vma; | 80 | struct vm_area_struct *vma; |
80 | unsigned long start_addr; | 81 | unsigned long start_addr; |
81 | unsigned long begin, end; | 82 | unsigned long begin, end; |
82 | 83 | ||
83 | if (flags & MAP_FIXED) | 84 | if (flags & MAP_FIXED) |
84 | return addr; | 85 | return addr; |
85 | 86 | ||
86 | find_start_end(flags, &begin, &end); | 87 | find_start_end(flags, &begin, &end); |
87 | 88 | ||
88 | if (len > end) | 89 | if (len > end) |
89 | return -ENOMEM; | 90 | return -ENOMEM; |
@@ -97,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
97 | } | 98 | } |
98 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) | 99 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) |
99 | && len <= mm->cached_hole_size) { | 100 | && len <= mm->cached_hole_size) { |
100 | mm->cached_hole_size = 0; | 101 | mm->cached_hole_size = 0; |
101 | mm->free_area_cache = begin; | 102 | mm->free_area_cache = begin; |
102 | } | 103 | } |
103 | addr = mm->free_area_cache; | 104 | addr = mm->free_area_cache; |
104 | if (addr < begin) | 105 | if (addr < begin) |
105 | addr = begin; | 106 | addr = begin; |
106 | start_addr = addr; | 107 | start_addr = addr; |
107 | 108 | ||
108 | full_search: | 109 | full_search: |
@@ -128,7 +129,7 @@ full_search: | |||
128 | return addr; | 129 | return addr; |
129 | } | 130 | } |
130 | if (addr + mm->cached_hole_size < vma->vm_start) | 131 | if (addr + mm->cached_hole_size < vma->vm_start) |
131 | mm->cached_hole_size = vma->vm_start - addr; | 132 | mm->cached_hole_size = vma->vm_start - addr; |
132 | 133 | ||
133 | addr = vma->vm_end; | 134 | addr = vma->vm_end; |
134 | } | 135 | } |
@@ -178,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
178 | vma = find_vma(mm, addr-len); | 179 | vma = find_vma(mm, addr-len); |
179 | if (!vma || addr <= vma->vm_start) | 180 | if (!vma || addr <= vma->vm_start) |
180 | /* remember the address as a hint for next time */ | 181 | /* remember the address as a hint for next time */ |
181 | return (mm->free_area_cache = addr-len); | 182 | return mm->free_area_cache = addr-len; |
182 | } | 183 | } |
183 | 184 | ||
184 | if (mm->mmap_base < len) | 185 | if (mm->mmap_base < len) |
@@ -195,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
195 | vma = find_vma(mm, addr); | 196 | vma = find_vma(mm, addr); |
196 | if (!vma || addr+len <= vma->vm_start) | 197 | if (!vma || addr+len <= vma->vm_start) |
197 | /* remember the address as a hint for next time */ | 198 | /* remember the address as a hint for next time */ |
198 | return (mm->free_area_cache = addr); | 199 | return mm->free_area_cache = addr; |
199 | 200 | ||
200 | /* remember the largest hole we saw so far */ | 201 | /* remember the largest hole we saw so far */ |
201 | if (addr + mm->cached_hole_size < vma->vm_start) | 202 | if (addr + mm->cached_hole_size < vma->vm_start) |
@@ -225,13 +226,13 @@ bottomup: | |||
225 | } | 226 | } |
226 | 227 | ||
227 | 228 | ||
228 | asmlinkage long sys_uname(struct new_utsname __user * name) | 229 | asmlinkage long sys_uname(struct new_utsname __user *name) |
229 | { | 230 | { |
230 | int err; | 231 | int err; |
231 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
232 | err = copy_to_user(name, utsname(), sizeof (*name)); | 233 | err = copy_to_user(name, utsname(), sizeof(*name)); |
233 | up_read(&uts_sem); | 234 | up_read(&uts_sem); |
234 | if (personality(current->personality) == PER_LINUX32) | 235 | if (personality(current->personality) == PER_LINUX32) |
235 | err |= copy_to_user(&name->machine, "i686", 5); | 236 | err |= copy_to_user(&name->machine, "i686", 5); |
236 | return err ? -EFAULT : 0; | 237 | return err ? -EFAULT : 0; |
237 | } | 238 | } |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 38eb76156a47..56d6f1147785 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/bug.h> | 32 | #include <linux/bug.h> |
33 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/smp.h> | ||
36 | #include <linux/io.h> | ||
35 | 37 | ||
36 | #if defined(CONFIG_EDAC) | 38 | #if defined(CONFIG_EDAC) |
37 | #include <linux/edac.h> | 39 | #include <linux/edac.h> |
@@ -45,9 +47,6 @@ | |||
45 | #include <asm/unwind.h> | 47 | #include <asm/unwind.h> |
46 | #include <asm/desc.h> | 48 | #include <asm/desc.h> |
47 | #include <asm/i387.h> | 49 | #include <asm/i387.h> |
48 | #include <asm/nmi.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/io.h> | ||
51 | #include <asm/pgalloc.h> | 50 | #include <asm/pgalloc.h> |
52 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
53 | #include <asm/pda.h> | 52 | #include <asm/pda.h> |
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
85 | 84 | ||
86 | void printk_address(unsigned long address, int reliable) | 85 | void printk_address(unsigned long address, int reliable) |
87 | { | 86 | { |
88 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); | 87 | printk(" [<%016lx>] %s%pS\n", |
88 | address, reliable ? "" : "? ", (void *) address); | ||
89 | } | 89 | } |
90 | 90 | ||
91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
98 | [STACKFAULT_STACK - 1] = "#SS", | 98 | [STACKFAULT_STACK - 1] = "#SS", |
99 | [MCE_STACK - 1] = "#MC", | 99 | [MCE_STACK - 1] = "#MC", |
100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | 100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
101 | [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | 101 | [N_EXCEPTION_STACKS ... |
102 | N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | ||
102 | #endif | 103 | #endif |
103 | }; | 104 | }; |
104 | unsigned k; | 105 | unsigned k; |
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
166 | * x86-64 can have up to three kernel stacks: | 167 | * x86-64 can have up to three kernel stacks: |
167 | * process stack | 168 | * process stack |
168 | * interrupt stack | 169 | * interrupt stack |
169 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 170 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
219 | const struct stacktrace_ops *ops, void *data) | 220 | const struct stacktrace_ops *ops, void *data) |
220 | { | 221 | { |
221 | const unsigned cpu = get_cpu(); | 222 | const unsigned cpu = get_cpu(); |
222 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | 223 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
223 | unsigned used = 0; | 224 | unsigned used = 0; |
224 | struct thread_info *tinfo; | 225 | struct thread_info *tinfo; |
225 | 226 | ||
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
237 | if (!bp) { | 238 | if (!bp) { |
238 | if (task == current) { | 239 | if (task == current) { |
239 | /* Grab bp right from our regs */ | 240 | /* Grab bp right from our regs */ |
240 | asm("movq %%rbp, %0" : "=r" (bp) :); | 241 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
241 | } else { | 242 | } else { |
242 | /* bp is the last reg pushed by switch_to */ | 243 | /* bp is the last reg pushed by switch_to */ |
243 | bp = *(unsigned long *) task->thread.sp; | 244 | bp = *(unsigned long *) task->thread.sp; |
@@ -357,11 +358,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
357 | unsigned long *stack; | 358 | unsigned long *stack; |
358 | int i; | 359 | int i; |
359 | const int cpu = smp_processor_id(); | 360 | const int cpu = smp_processor_id(); |
360 | unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); | 361 | unsigned long *irqstack_end = |
361 | unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | 362 | (unsigned long *) (cpu_pda(cpu)->irqstackptr); |
363 | unsigned long *irqstack = | ||
364 | (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | ||
362 | 365 | ||
363 | // debugging aid: "show_stack(NULL, NULL);" prints the | 366 | /* |
364 | // back trace for this cpu. | 367 | * debugging aid: "show_stack(NULL, NULL);" prints the |
368 | * back trace for this cpu. | ||
369 | */ | ||
365 | 370 | ||
366 | if (sp == NULL) { | 371 | if (sp == NULL) { |
367 | if (task) | 372 | if (task) |
@@ -404,7 +409,7 @@ void dump_stack(void) | |||
404 | 409 | ||
405 | #ifdef CONFIG_FRAME_POINTER | 410 | #ifdef CONFIG_FRAME_POINTER |
406 | if (!bp) | 411 | if (!bp) |
407 | asm("movq %%rbp, %0" : "=r" (bp):); | 412 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
408 | #endif | 413 | #endif |
409 | 414 | ||
410 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 415 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
@@ -414,7 +419,6 @@ void dump_stack(void) | |||
414 | init_utsname()->version); | 419 | init_utsname()->version); |
415 | show_trace(NULL, NULL, &stack, bp); | 420 | show_trace(NULL, NULL, &stack, bp); |
416 | } | 421 | } |
417 | |||
418 | EXPORT_SYMBOL(dump_stack); | 422 | EXPORT_SYMBOL(dump_stack); |
419 | 423 | ||
420 | void show_registers(struct pt_regs *regs) | 424 | void show_registers(struct pt_regs *regs) |
@@ -493,7 +497,7 @@ unsigned __kprobes long oops_begin(void) | |||
493 | raw_local_irq_save(flags); | 497 | raw_local_irq_save(flags); |
494 | cpu = smp_processor_id(); | 498 | cpu = smp_processor_id(); |
495 | if (!__raw_spin_trylock(&die_lock)) { | 499 | if (!__raw_spin_trylock(&die_lock)) { |
496 | if (cpu == die_owner) | 500 | if (cpu == die_owner) |
497 | /* nested oops. should stop eventually */; | 501 | /* nested oops. should stop eventually */; |
498 | else | 502 | else |
499 | __raw_spin_lock(&die_lock); | 503 | __raw_spin_lock(&die_lock); |
@@ -638,7 +642,7 @@ kernel_trap: | |||
638 | } | 642 | } |
639 | 643 | ||
640 | #define DO_ERROR(trapnr, signr, str, name) \ | 644 | #define DO_ERROR(trapnr, signr, str, name) \ |
641 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 645 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
642 | { \ | 646 | { \ |
643 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 647 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
644 | == NOTIFY_STOP) \ | 648 | == NOTIFY_STOP) \ |
@@ -648,7 +652,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
648 | } | 652 | } |
649 | 653 | ||
650 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 654 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
651 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 655 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
652 | { \ | 656 | { \ |
653 | siginfo_t info; \ | 657 | siginfo_t info; \ |
654 | info.si_signo = signr; \ | 658 | info.si_signo = signr; \ |
@@ -683,7 +687,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | |||
683 | preempt_conditional_cli(regs); | 687 | preempt_conditional_cli(regs); |
684 | } | 688 | } |
685 | 689 | ||
686 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | 690 | asmlinkage void do_double_fault(struct pt_regs *regs, long error_code) |
687 | { | 691 | { |
688 | static const char str[] = "double fault"; | 692 | static const char str[] = "double fault"; |
689 | struct task_struct *tsk = current; | 693 | struct task_struct *tsk = current; |
@@ -778,9 +782,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
778 | } | 782 | } |
779 | 783 | ||
780 | static notrace __kprobes void | 784 | static notrace __kprobes void |
781 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 785 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
782 | { | 786 | { |
783 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 787 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == |
788 | NOTIFY_STOP) | ||
784 | return; | 789 | return; |
785 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 790 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
786 | reason); | 791 | reason); |
@@ -882,7 +887,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
882 | else if (user_mode(eregs)) | 887 | else if (user_mode(eregs)) |
883 | regs = task_pt_regs(current); | 888 | regs = task_pt_regs(current); |
884 | /* Exception from kernel and interrupts are enabled. Move to | 889 | /* Exception from kernel and interrupts are enabled. Move to |
885 | kernel process stack. */ | 890 | kernel process stack. */ |
886 | else if (eregs->flags & X86_EFLAGS_IF) | 891 | else if (eregs->flags & X86_EFLAGS_IF) |
887 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | 892 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); |
888 | if (eregs != regs) | 893 | if (eregs != regs) |
@@ -891,7 +896,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
891 | } | 896 | } |
892 | 897 | ||
893 | /* runs on IST stack. */ | 898 | /* runs on IST stack. */ |
894 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | 899 | asmlinkage void __kprobes do_debug(struct pt_regs *regs, |
895 | unsigned long error_code) | 900 | unsigned long error_code) |
896 | { | 901 | { |
897 | struct task_struct *tsk = current; | 902 | struct task_struct *tsk = current; |
@@ -1035,7 +1040,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1035 | 1040 | ||
1036 | asmlinkage void bad_intr(void) | 1041 | asmlinkage void bad_intr(void) |
1037 | { | 1042 | { |
1038 | printk("bad interrupt"); | 1043 | printk("bad interrupt"); |
1039 | } | 1044 | } |
1040 | 1045 | ||
1041 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1046 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
@@ -1047,7 +1052,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1047 | 1052 | ||
1048 | conditional_sti(regs); | 1053 | conditional_sti(regs); |
1049 | if (!user_mode(regs) && | 1054 | if (!user_mode(regs) && |
1050 | kernel_math_error(regs, "kernel simd math error", 19)) | 1055 | kernel_math_error(regs, "kernel simd math error", 19)) |
1051 | return; | 1056 | return; |
1052 | 1057 | ||
1053 | /* | 1058 | /* |
@@ -1092,7 +1097,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1092 | force_sig_info(SIGFPE, &info, task); | 1097 | force_sig_info(SIGFPE, &info, task); |
1093 | } | 1098 | } |
1094 | 1099 | ||
1095 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) | 1100 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs) |
1096 | { | 1101 | { |
1097 | } | 1102 | } |
1098 | 1103 | ||
@@ -1149,8 +1154,10 @@ void __init trap_init(void) | |||
1149 | set_intr_gate(0, ÷_error); | 1154 | set_intr_gate(0, ÷_error); |
1150 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | 1155 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1151 | set_intr_gate_ist(2, &nmi, NMI_STACK); | 1156 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1152 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ | 1157 | /* int3 can be called from all */ |
1153 | set_system_gate(4, &overflow); /* int4 can be called from all */ | 1158 | set_system_gate_ist(3, &int3, DEBUG_STACK); |
1159 | /* int4 can be called from all */ | ||
1160 | set_system_gate(4, &overflow); | ||
1154 | set_intr_gate(5, &bounds); | 1161 | set_intr_gate(5, &bounds); |
1155 | set_intr_gate(6, &invalid_op); | 1162 | set_intr_gate(6, &invalid_op); |
1156 | set_intr_gate(7, &device_not_available); | 1163 | set_intr_gate(7, &device_not_available); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 594ef47f0a63..61a97e616f70 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -25,45 +25,31 @@ | |||
25 | #include <asm/visws/cobalt.h> | 25 | #include <asm/visws/cobalt.h> |
26 | #include <asm/visws/piix4.h> | 26 | #include <asm/visws/piix4.h> |
27 | #include <asm/arch_hooks.h> | 27 | #include <asm/arch_hooks.h> |
28 | #include <asm/io_apic.h> | ||
28 | #include <asm/fixmap.h> | 29 | #include <asm/fixmap.h> |
29 | #include <asm/reboot.h> | 30 | #include <asm/reboot.h> |
30 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
31 | #include <asm/e820.h> | 32 | #include <asm/e820.h> |
32 | #include <asm/smp.h> | ||
33 | #include <asm/io.h> | 33 | #include <asm/io.h> |
34 | 34 | ||
35 | #include <mach_ipi.h> | 35 | #include <mach_ipi.h> |
36 | 36 | ||
37 | #include "mach_apic.h" | 37 | #include "mach_apic.h" |
38 | 38 | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/smp.h> | ||
41 | |||
42 | #include <linux/kernel_stat.h> | 39 | #include <linux/kernel_stat.h> |
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/init.h> | ||
45 | 40 | ||
46 | #include <asm/io.h> | ||
47 | #include <asm/apic.h> | ||
48 | #include <asm/i8259.h> | 41 | #include <asm/i8259.h> |
49 | #include <asm/irq_vectors.h> | 42 | #include <asm/irq_vectors.h> |
50 | #include <asm/visws/cobalt.h> | ||
51 | #include <asm/visws/lithium.h> | 43 | #include <asm/visws/lithium.h> |
52 | #include <asm/visws/piix4.h> | ||
53 | 44 | ||
54 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
55 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
56 | #include <linux/init.h> | ||
57 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
58 | #include <linux/pci_ids.h> | 48 | #include <linux/pci_ids.h> |
59 | 49 | ||
60 | extern int no_broadcast; | 50 | extern int no_broadcast; |
61 | 51 | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/apic.h> | 52 | #include <asm/apic.h> |
64 | #include <asm/arch_hooks.h> | ||
65 | #include <asm/visws/cobalt.h> | ||
66 | #include <asm/visws/lithium.h> | ||
67 | 53 | ||
68 | char visws_board_type = -1; | 54 | char visws_board_type = -1; |
69 | char visws_board_rev = -1; | 55 | char visws_board_rev = -1; |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 01b868ba82f8..321cf720dbb6 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info) | |||
16 | rdmsr(rv->msr_no, rv->l, rv->h); | 16 | rdmsr(rv->msr_no, rv->l, rv->h); |
17 | } | 17 | } |
18 | 18 | ||
19 | static void __rdmsr_safe_on_cpu(void *info) | 19 | static void __wrmsr_on_cpu(void *info) |
20 | { | 20 | { |
21 | struct msr_info *rv = info; | 21 | struct msr_info *rv = info; |
22 | 22 | ||
23 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); | 23 | wrmsr(rv->msr_no, rv->l, rv->h); |
24 | } | 24 | } |
25 | 25 | ||
26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | 26 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
27 | { | 27 | { |
28 | int err = 0; | 28 | int err; |
29 | struct msr_info rv; | 29 | struct msr_info rv; |
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
33 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, | ||
34 | &rv, 1); | ||
35 | err = err ? err : rv.err; | ||
36 | } else { | ||
37 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | ||
38 | } | ||
39 | *l = rv.l; | 33 | *l = rv.l; |
40 | *h = rv.h; | 34 | *h = rv.h; |
41 | 35 | ||
42 | return err; | 36 | return err; |
43 | } | 37 | } |
44 | 38 | ||
45 | static void __wrmsr_on_cpu(void *info) | 39 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
40 | { | ||
41 | int err; | ||
42 | struct msr_info rv; | ||
43 | |||
44 | rv.msr_no = msr_no; | ||
45 | rv.l = l; | ||
46 | rv.h = h; | ||
47 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
48 | |||
49 | return err; | ||
50 | } | ||
51 | |||
52 | /* These "safe" variants are slower and should be used when the target MSR | ||
53 | may not actually exist. */ | ||
54 | static void __rdmsr_safe_on_cpu(void *info) | ||
46 | { | 55 | { |
47 | struct msr_info *rv = info; | 56 | struct msr_info *rv = info; |
48 | 57 | ||
49 | wrmsr(rv->msr_no, rv->l, rv->h); | 58 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); |
50 | } | 59 | } |
51 | 60 | ||
52 | static void __wrmsr_safe_on_cpu(void *info) | 61 | static void __wrmsr_safe_on_cpu(void *info) |
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info) | |||
56 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); | 65 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); |
57 | } | 66 | } |
58 | 67 | ||
59 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | 68 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
60 | { | 69 | { |
61 | int err = 0; | 70 | int err; |
62 | struct msr_info rv; | 71 | struct msr_info rv; |
63 | 72 | ||
64 | rv.msr_no = msr_no; | 73 | rv.msr_no = msr_no; |
65 | rv.l = l; | 74 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
66 | rv.h = h; | 75 | *l = rv.l; |
67 | if (safe) { | 76 | *h = rv.h; |
68 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, | ||
69 | &rv, 1); | ||
70 | err = err ? err : rv.err; | ||
71 | } else { | ||
72 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
73 | } | ||
74 | |||
75 | return err; | ||
76 | } | ||
77 | 77 | ||
78 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 78 | return err ? err : rv.err; |
79 | { | ||
80 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
81 | } | 79 | } |
82 | 80 | ||
83 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
84 | { | ||
85 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
86 | } | ||
87 | |||
88 | /* These "safe" variants are slower and should be used when the target MSR | ||
89 | may not actually exist. */ | ||
90 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 81 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
91 | { | 82 | { |
92 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); | 83 | int err; |
93 | } | 84 | struct msr_info rv; |
94 | 85 | ||
95 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 86 | rv.msr_no = msr_no; |
96 | { | 87 | rv.l = l; |
97 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); | 88 | rv.h = h; |
89 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
90 | |||
91 | return err ? err : rv.err; | ||
98 | } | 92 | } |
99 | 93 | ||
100 | EXPORT_SYMBOL(rdmsr_on_cpu); | 94 | EXPORT_SYMBOL(rdmsr_on_cpu); |
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 94972e7c094d..82004d2bf05e 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c | |||
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src) | |||
22 | "testb %%al,%%al\n\t" | 22 | "testb %%al,%%al\n\t" |
23 | "jne 1b" | 23 | "jne 1b" |
24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) | 24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) |
25 | :"0" (src), "1" (dest) : "memory"); | 25 | : "0" (src), "1" (dest) : "memory"); |
26 | return dest; | 26 | return dest; |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(strcpy); | 28 | EXPORT_SYMBOL(strcpy); |
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count) | |||
42 | "stosb\n" | 42 | "stosb\n" |
43 | "2:" | 43 | "2:" |
44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) | 44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) |
45 | :"0" (src), "1" (dest), "2" (count) : "memory"); | 45 | : "0" (src), "1" (dest), "2" (count) : "memory"); |
46 | return dest; | 46 | return dest; |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(strncpy); | 48 | EXPORT_SYMBOL(strncpy); |
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src) | |||
60 | "testb %%al,%%al\n\t" | 60 | "testb %%al,%%al\n\t" |
61 | "jne 1b" | 61 | "jne 1b" |
62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | 62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) |
63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); | 63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory"); |
64 | return dest; | 64 | return dest; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(strcat); | 66 | EXPORT_SYMBOL(strcat); |
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct) | |||
105 | "2:\tsbbl %%eax,%%eax\n\t" | 105 | "2:\tsbbl %%eax,%%eax\n\t" |
106 | "orb $1,%%al\n" | 106 | "orb $1,%%al\n" |
107 | "3:" | 107 | "3:" |
108 | :"=a" (res), "=&S" (d0), "=&D" (d1) | 108 | : "=a" (res), "=&S" (d0), "=&D" (d1) |
109 | :"1" (cs), "2" (ct) | 109 | : "1" (cs), "2" (ct) |
110 | :"memory"); | 110 | : "memory"); |
111 | return res; | 111 | return res; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(strcmp); | 113 | EXPORT_SYMBOL(strcmp); |
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count) | |||
130 | "3:\tsbbl %%eax,%%eax\n\t" | 130 | "3:\tsbbl %%eax,%%eax\n\t" |
131 | "orb $1,%%al\n" | 131 | "orb $1,%%al\n" |
132 | "4:" | 132 | "4:" |
133 | :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) | 133 | : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) |
134 | :"1" (cs), "2" (ct), "3" (count) | 134 | : "1" (cs), "2" (ct), "3" (count) |
135 | :"memory"); | 135 | : "memory"); |
136 | return res; | 136 | return res; |
137 | } | 137 | } |
138 | EXPORT_SYMBOL(strncmp); | 138 | EXPORT_SYMBOL(strncmp); |
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c) | |||
152 | "movl $1,%1\n" | 152 | "movl $1,%1\n" |
153 | "2:\tmovl %1,%0\n\t" | 153 | "2:\tmovl %1,%0\n\t" |
154 | "decl %0" | 154 | "decl %0" |
155 | :"=a" (res), "=&S" (d0) | 155 | : "=a" (res), "=&S" (d0) |
156 | :"1" (s), "0" (c) | 156 | : "1" (s), "0" (c) |
157 | :"memory"); | 157 | : "memory"); |
158 | return res; | 158 | return res; |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(strchr); | 160 | EXPORT_SYMBOL(strchr); |
@@ -169,9 +169,9 @@ size_t strlen(const char *s) | |||
169 | "scasb\n\t" | 169 | "scasb\n\t" |
170 | "notl %0\n\t" | 170 | "notl %0\n\t" |
171 | "decl %0" | 171 | "decl %0" |
172 | :"=c" (res), "=&D" (d0) | 172 | : "=c" (res), "=&D" (d0) |
173 | :"1" (s), "a" (0), "0" (0xffffffffu) | 173 | : "1" (s), "a" (0), "0" (0xffffffffu) |
174 | :"memory"); | 174 | : "memory"); |
175 | return res; | 175 | return res; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(strlen); | 177 | EXPORT_SYMBOL(strlen); |
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count) | |||
189 | "je 1f\n\t" | 189 | "je 1f\n\t" |
190 | "movl $1,%0\n" | 190 | "movl $1,%0\n" |
191 | "1:\tdecl %0" | 191 | "1:\tdecl %0" |
192 | :"=D" (res), "=&c" (d0) | 192 | : "=D" (res), "=&c" (d0) |
193 | :"a" (c), "0" (cs), "1" (count) | 193 | : "a" (c), "0" (cs), "1" (count) |
194 | :"memory"); | 194 | : "memory"); |
195 | return res; | 195 | return res; |
196 | } | 196 | } |
197 | EXPORT_SYMBOL(memchr); | 197 | EXPORT_SYMBOL(memchr); |
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count) | |||
228 | "cmpl $-1,%1\n\t" | 228 | "cmpl $-1,%1\n\t" |
229 | "jne 1b\n" | 229 | "jne 1b\n" |
230 | "3:\tsubl %2,%0" | 230 | "3:\tsubl %2,%0" |
231 | :"=a" (res), "=&d" (d0) | 231 | : "=a" (res), "=&d" (d0) |
232 | :"c" (s), "1" (count) | 232 | : "c" (s), "1" (count) |
233 | :"memory"); | 233 | : "memory"); |
234 | return res; | 234 | return res; |
235 | } | 235 | } |
236 | EXPORT_SYMBOL(strnlen); | 236 | EXPORT_SYMBOL(strnlen); |
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index 42e8a50303f3..8e2d55f754bf 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c | |||
@@ -23,9 +23,9 @@ __asm__ __volatile__( | |||
23 | "jne 1b\n\t" | 23 | "jne 1b\n\t" |
24 | "xorl %%eax,%%eax\n\t" | 24 | "xorl %%eax,%%eax\n\t" |
25 | "2:" | 25 | "2:" |
26 | :"=a" (__res), "=&c" (d0), "=&S" (d1) | 26 | : "=a" (__res), "=&c" (d0), "=&S" (d1) |
27 | :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) | 27 | : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) |
28 | :"dx", "di"); | 28 | : "dx", "di"); |
29 | return __res; | 29 | return __res; |
30 | } | 30 | } |
31 | 31 | ||
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 62fa440678d8..847c164725f4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn, | |||
328 | 328 | ||
329 | get_memcfg_numa(); | 329 | get_memcfg_numa(); |
330 | 330 | ||
331 | kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); | 331 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
332 | 332 | ||
333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
334 | do { | 334 | do { |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a20d1fa64b4e..e7277cbcfb40 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
148 | * we have now. "break" is either changing perms, levels or | 148 | * we have now. "break" is either changing perms, levels or |
149 | * address space marker. | 149 | * address space marker. |
150 | */ | 150 | */ |
151 | prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); | 151 | prot = pgprot_val(new_prot) & PTE_FLAGS_MASK; |
152 | cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); | 152 | cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; |
153 | 153 | ||
154 | if (!st->level) { | 154 | if (!st->level) { |
155 | /* First entry */ | 155 | /* First entry */ |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d3746efb060d..770536ebf7e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
225 | void __init cleanup_highmap(void) | 225 | void __init cleanup_highmap(void) |
226 | { | 226 | { |
227 | unsigned long vaddr = __START_KERNEL_map; | 227 | unsigned long vaddr = __START_KERNEL_map; |
228 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | 228 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; |
229 | pmd_t *pmd = level2_kernel_pgt; | 229 | pmd_t *pmd = level2_kernel_pgt; |
230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
231 | 231 | ||
@@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end) | |||
451 | unsigned long puds, pmds, ptes, tables, start; | 451 | unsigned long puds, pmds, ptes, tables, start; |
452 | 452 | ||
453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
454 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); | 454 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
455 | if (direct_gbpages) { | 455 | if (direct_gbpages) { |
456 | unsigned long extra; | 456 | unsigned long extra; |
457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
459 | } else | 459 | } else |
460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
461 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 461 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
462 | 462 | ||
463 | if (cpu_has_pse) { | 463 | if (cpu_has_pse) { |
464 | unsigned long extra; | 464 | unsigned long extra; |
@@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end) | |||
466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
467 | } else | 467 | } else |
468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
469 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | 469 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
470 | 470 | ||
471 | /* | 471 | /* |
472 | * RED-PEN putting page tables only on node 0 could | 472 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a4dd793d6003..cebcbf152d46 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | addr = 0x8000; | 81 | addr = 0x8000; |
82 | nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 82 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, |
84 | nodemap_size, L1_CACHE_BYTES); | 84 | nodemap_size, L1_CACHE_BYTES); |
85 | if (nodemap_addr == -1UL) { | 85 | if (nodemap_addr == -1UL) { |
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; | 176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; |
177 | unsigned long bootmap_start, nodedata_phys; | 177 | unsigned long bootmap_start, nodedata_phys; |
178 | void *bootmap; | 178 | void *bootmap; |
179 | const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); | 179 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
180 | int nid; | 180 | int nid; |
181 | 181 | ||
182 | start = round_up(start, ZONE_ALIGN); | 182 | start = roundup(start, ZONE_ALIGN); |
183 | 183 | ||
184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, | 184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, |
185 | start, end); | 185 | start, end); |
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | 210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); |
211 | nid = phys_to_nid(nodedata_phys); | 211 | nid = phys_to_nid(nodedata_phys); |
212 | if (nid == nodeid) | 212 | if (nid == nodeid) |
213 | bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); | 213 | bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); |
214 | else | 214 | else |
215 | bootmap_start = round_up(start, PAGE_SIZE); | 215 | bootmap_start = roundup(start, PAGE_SIZE); |
216 | /* | 216 | /* |
217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | 217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like |
218 | * to use that to align to PAGE_SIZE | 218 | * to use that to align to PAGE_SIZE |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 43e2f8483e4f..be54f501776a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void) | |||
84 | 84 | ||
85 | static inline unsigned long highmap_end_pfn(void) | 85 | static inline unsigned long highmap_end_pfn(void) |
86 | { | 86 | { |
87 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 87 | return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; |
88 | } | 88 | } |
89 | 89 | ||
90 | #endif | 90 | #endif |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 6a0fca78c362..22e057665e55 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self, | |||
580 | unsigned long action, void *hcpu) | 580 | unsigned long action, void *hcpu) |
581 | { | 581 | { |
582 | int cpu = (long)hcpu; | 582 | int cpu = (long)hcpu; |
583 | switch(action) { | 583 | switch (action) { |
584 | case CPU_ONLINE: | 584 | case CPU_ONLINE: |
585 | case CPU_ONLINE_FROZEN: | 585 | case CPU_ONLINE_FROZEN: |
586 | smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); | 586 | smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 8e077185e185..006599db0dc7 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void) | |||
1043 | if (io_apic_assign_pci_irqs) { | 1043 | if (io_apic_assign_pci_irqs) { |
1044 | int irq; | 1044 | int irq; |
1045 | 1045 | ||
1046 | if (pin) { | 1046 | if (!pin) |
1047 | /* | 1047 | continue; |
1048 | * interrupt pins are numbered starting | 1048 | |
1049 | * from 1 | 1049 | /* |
1050 | */ | 1050 | * interrupt pins are numbered starting from 1 |
1051 | pin--; | 1051 | */ |
1052 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | 1052 | pin--; |
1053 | PCI_SLOT(dev->devfn), pin); | 1053 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1054 | /* | 1054 | PCI_SLOT(dev->devfn), pin); |
1055 | * Busses behind bridges are typically not listed in the MP-table. | 1055 | /* |
1056 | * In this case we have to look up the IRQ based on the parent bus, | 1056 | * Busses behind bridges are typically not listed in the |
1057 | * parent slot, and pin number. The SMP code detects such bridged | 1057 | * MP-table. In this case we have to look up the IRQ |
1058 | * busses itself so we should get into this branch reliably. | 1058 | * based on the parent bus, parent slot, and pin number. |
1059 | */ | 1059 | * The SMP code detects such bridged busses itself so we |
1060 | if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ | 1060 | * should get into this branch reliably. |
1061 | struct pci_dev *bridge = dev->bus->self; | 1061 | */ |
1062 | 1062 | if (irq < 0 && dev->bus->parent) { | |
1063 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1063 | /* go back to the bridge */ |
1064 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1064 | struct pci_dev *bridge = dev->bus->self; |
1065 | PCI_SLOT(bridge->devfn), pin); | 1065 | int bus; |
1066 | if (irq >= 0) | 1066 | |
1067 | dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", | 1067 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; |
1068 | pci_name(bridge), | 1068 | bus = bridge->bus->number; |
1069 | 'A' + pin, irq); | 1069 | irq = IO_APIC_get_PCI_irq_vector(bus, |
1070 | } | 1070 | PCI_SLOT(bridge->devfn), pin); |
1071 | if (irq >= 0) { | 1071 | if (irq >= 0) |
1072 | dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); | 1072 | dev_warn(&dev->dev, |
1073 | dev->irq = irq; | 1073 | "using bridge %s INT %c to " |
1074 | } | 1074 | "get IRQ %d\n", |
1075 | pci_name(bridge), | ||
1076 | 'A' + pin, irq); | ||
1077 | } | ||
1078 | if (irq >= 0) { | ||
1079 | dev_info(&dev->dev, | ||
1080 | "PCI->APIC IRQ transform: INT %c " | ||
1081 | "-> IRQ %d\n", | ||
1082 | 'A' + pin, irq); | ||
1083 | dev->irq = irq; | ||
1075 | } | 1084 | } |
1076 | } | 1085 | } |
1077 | #endif | 1086 | #endif |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 4fc7e872c85e..d1e9b53f9d33 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
@@ -1,5 +1,3 @@ | |||
1 | .text | ||
2 | |||
3 | /* | 1 | /* |
4 | * This may not use any stack, nor any variable that is not "NoSave": | 2 | * This may not use any stack, nor any variable that is not "NoSave": |
5 | * | 3 | * |
@@ -12,17 +10,18 @@ | |||
12 | #include <asm/segment.h> | 10 | #include <asm/segment.h> |
13 | #include <asm/page.h> | 11 | #include <asm/page.h> |
14 | #include <asm/asm-offsets.h> | 12 | #include <asm/asm-offsets.h> |
13 | #include <asm/processor-flags.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | 16 | ||
18 | ENTRY(swsusp_arch_suspend) | 17 | ENTRY(swsusp_arch_suspend) |
19 | |||
20 | movl %esp, saved_context_esp | 18 | movl %esp, saved_context_esp |
21 | movl %ebx, saved_context_ebx | 19 | movl %ebx, saved_context_ebx |
22 | movl %ebp, saved_context_ebp | 20 | movl %ebp, saved_context_ebp |
23 | movl %esi, saved_context_esi | 21 | movl %esi, saved_context_esi |
24 | movl %edi, saved_context_edi | 22 | movl %edi, saved_context_edi |
25 | pushfl ; popl saved_context_eflags | 23 | pushfl |
24 | popl saved_context_eflags | ||
26 | 25 | ||
27 | call swsusp_save | 26 | call swsusp_save |
28 | ret | 27 | ret |
@@ -59,7 +58,7 @@ done: | |||
59 | movl mmu_cr4_features, %ecx | 58 | movl mmu_cr4_features, %ecx |
60 | jecxz 1f # cr4 Pentium and higher, skip if zero | 59 | jecxz 1f # cr4 Pentium and higher, skip if zero |
61 | movl %ecx, %edx | 60 | movl %ecx, %edx |
62 | andl $~(1<<7), %edx; # PGE | 61 | andl $~(X86_CR4_PGE), %edx |
63 | movl %edx, %cr4; # turn off PGE | 62 | movl %edx, %cr4; # turn off PGE |
64 | 1: | 63 | 1: |
65 | movl %cr3, %eax; # flush TLB | 64 | movl %cr3, %eax; # flush TLB |
@@ -74,7 +73,8 @@ done: | |||
74 | movl saved_context_esi, %esi | 73 | movl saved_context_esi, %esi |
75 | movl saved_context_edi, %edi | 74 | movl saved_context_edi, %edi |
76 | 75 | ||
77 | pushl saved_context_eflags ; popfl | 76 | pushl saved_context_eflags |
77 | popfl | ||
78 | 78 | ||
79 | xorl %eax, %eax | 79 | xorl %eax, %eax |
80 | 80 | ||