aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/crash_dump_64.c7
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/mpparse.c6
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process_64.c131
-rw-r--r--arch/x86/kernel/sigframe.h5
-rw-r--r--arch/x86/kernel/signal_64.c62
-rw-r--r--arch/x86/kernel/sys_x86_64.c43
-rw-r--r--arch/x86/kernel/traps_64.c59
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/visws_quirks.c16
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/numa_64.c10
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--include/asm-x86/apic.h4
-rw-r--r--include/asm-x86/mmconfig.h2
17 files changed, 178 insertions, 185 deletions
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 15e6c6bc4a46..d3e524c84527 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -7,9 +7,8 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/crash_dump.h> 9#include <linux/crash_dump.h>
10 10#include <linux/uaccess.h>
11#include <asm/uaccess.h> 11#include <linux/io.h>
12#include <asm/io.h>
13 12
14/** 13/**
15 * copy_oldmem_page - copy one page from "oldmem" 14 * copy_oldmem_page - copy one page from "oldmem"
@@ -25,7 +24,7 @@
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */ 25 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf) 27 size_t csize, unsigned long offset, int userbuf)
29{ 28{
30 void *vaddr; 29 void *vaddr;
31 30
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index fdfdc550b366..efc2f361fe85 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
238 {} 238 {}
239}; 239};
240 240
241void __init check_enable_amd_mmconf_dmi(void) 241void __cpuinit check_enable_amd_mmconf_dmi(void)
242{ 242{
243 dmi_check_system(mmconf_dmi_table); 243 dmi_check_system(mmconf_dmi_table);
244} 244}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 6ae005ccaed8..2c1963b39621 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
49 return sum & 0xFF; 49 return sum & 0xFF;
50} 50}
51 51
52static void __cpuinit MP_processor_info(struct mpc_config_processor *m) 52static void __init MP_processor_info(struct mpc_config_processor *m)
53{ 53{
54 int apicid; 54 int apicid;
55 char *bootup_cpu = ""; 55 char *bootup_cpu = "";
@@ -484,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
484} 484}
485 485
486 486
487static void construct_ioapic_table(int mpc_default_type) 487static void __init construct_ioapic_table(int mpc_default_type)
488{ 488{
489 struct mpc_config_ioapic ioapic; 489 struct mpc_config_ioapic ioapic;
490 struct mpc_config_bus bus; 490 struct mpc_config_bus bus;
@@ -529,7 +529,7 @@ static void construct_ioapic_table(int mpc_default_type)
529 construct_default_ioirq_mptable(mpc_default_type); 529 construct_default_ioirq_mptable(mpc_default_type);
530} 530}
531#else 531#else
532static inline void construct_ioapic_table(int mpc_default_type) { } 532static inline void __init construct_ioapic_table(int mpc_default_type) { }
533#endif 533#endif
534 534
535static inline void __init construct_default_ISA_mptable(int mpc_default_type) 535static inline void __init construct_default_ISA_mptable(int mpc_default_type)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec2..f704cb51ff82 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void)
82 * using 512M as goal 82 * using 512M as goal
83 */ 83 */
84 align = 64ULL<<20; 84 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align); 85 size = roundup(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20); 87 512ULL<<20);
88 if (dma32_bootmem_ptr) 88 if (dma32_bootmem_ptr)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3fb62a7d9a16..3560d7f4d74e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -88,7 +88,7 @@ void exit_idle(void)
88#ifdef CONFIG_HOTPLUG_CPU 88#ifdef CONFIG_HOTPLUG_CPU
89DECLARE_PER_CPU(int, cpu_state); 89DECLARE_PER_CPU(int, cpu_state);
90 90
91#include <asm/nmi.h> 91#include <linux/nmi.h>
92/* We halt the CPU with physical CPU hotplug */ 92/* We halt the CPU with physical CPU hotplug */
93static inline void play_dead(void) 93static inline void play_dead(void)
94{ 94{
@@ -152,7 +152,7 @@ void cpu_idle(void)
152} 152}
153 153
154/* Prints also some state that isn't saved in the pt_regs */ 154/* Prints also some state that isn't saved in the pt_regs */
155void __show_regs(struct pt_regs * regs) 155void __show_regs(struct pt_regs *regs)
156{ 156{
157 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 157 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
158 unsigned long d0, d1, d2, d3, d6, d7; 158 unsigned long d0, d1, d2, d3, d6, d7;
@@ -161,59 +161,61 @@ void __show_regs(struct pt_regs * regs)
161 161
162 printk("\n"); 162 printk("\n");
163 print_modules(); 163 print_modules();
164 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 164 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
165 current->pid, current->comm, print_tainted(), 165 current->pid, current->comm, print_tainted(),
166 init_utsname()->release, 166 init_utsname()->release,
167 (int)strcspn(init_utsname()->version, " "), 167 (int)strcspn(init_utsname()->version, " "),
168 init_utsname()->version); 168 init_utsname()->version);
169 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 169 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
170 printk_address(regs->ip, 1); 170 printk_address(regs->ip, 1);
171 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 171 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
172 regs->flags); 172 regs->sp, regs->flags);
173 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 173 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
174 regs->ax, regs->bx, regs->cx); 174 regs->ax, regs->bx, regs->cx);
175 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 175 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
176 regs->dx, regs->si, regs->di); 176 regs->dx, regs->si, regs->di);
177 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 177 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
178 regs->bp, regs->r8, regs->r9); 178 regs->bp, regs->r8, regs->r9);
179 printk("R10: %016lx R11: %016lx R12: %016lx\n", 179 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
180 regs->r10, regs->r11, regs->r12); 180 regs->r10, regs->r11, regs->r12);
181 printk("R13: %016lx R14: %016lx R15: %016lx\n", 181 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
182 regs->r13, regs->r14, regs->r15); 182 regs->r13, regs->r14, regs->r15);
183 183
184 asm("movl %%ds,%0" : "=r" (ds)); 184 asm("movl %%ds,%0" : "=r" (ds));
185 asm("movl %%cs,%0" : "=r" (cs)); 185 asm("movl %%cs,%0" : "=r" (cs));
186 asm("movl %%es,%0" : "=r" (es)); 186 asm("movl %%es,%0" : "=r" (es));
187 asm("movl %%fs,%0" : "=r" (fsindex)); 187 asm("movl %%fs,%0" : "=r" (fsindex));
188 asm("movl %%gs,%0" : "=r" (gsindex)); 188 asm("movl %%gs,%0" : "=r" (gsindex));
189 189
190 rdmsrl(MSR_FS_BASE, fs); 190 rdmsrl(MSR_FS_BASE, fs);
191 rdmsrl(MSR_GS_BASE, gs); 191 rdmsrl(MSR_GS_BASE, gs);
192 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 192 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
193 193
194 cr0 = read_cr0(); 194 cr0 = read_cr0();
195 cr2 = read_cr2(); 195 cr2 = read_cr2();
196 cr3 = read_cr3(); 196 cr3 = read_cr3();
197 cr4 = read_cr4(); 197 cr4 = read_cr4();
198 198
199 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 199 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
200 fs,fsindex,gs,gsindex,shadowgs); 200 fs, fsindex, gs, gsindex, shadowgs);
201 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 201 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
202 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 202 es, cr0);
203 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
204 cr4);
203 205
204 get_debugreg(d0, 0); 206 get_debugreg(d0, 0);
205 get_debugreg(d1, 1); 207 get_debugreg(d1, 1);
206 get_debugreg(d2, 2); 208 get_debugreg(d2, 2);
207 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 209 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 get_debugreg(d3, 3); 210 get_debugreg(d3, 3);
209 get_debugreg(d6, 6); 211 get_debugreg(d6, 6);
210 get_debugreg(d7, 7); 212 get_debugreg(d7, 7);
211 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 213 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212} 214}
213 215
214void show_regs(struct pt_regs *regs) 216void show_regs(struct pt_regs *regs)
215{ 217{
216 printk("CPU %d:", smp_processor_id()); 218 printk(KERN_INFO "CPU %d:", smp_processor_id());
217 __show_regs(regs); 219 __show_regs(regs);
218 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 220 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
219} 221}
@@ -314,10 +316,10 @@ void prepare_to_copy(struct task_struct *tsk)
314 316
315int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 317int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
316 unsigned long unused, 318 unsigned long unused,
317 struct task_struct * p, struct pt_regs * regs) 319 struct task_struct *p, struct pt_regs *regs)
318{ 320{
319 int err; 321 int err;
320 struct pt_regs * childregs; 322 struct pt_regs *childregs;
321 struct task_struct *me = current; 323 struct task_struct *me = current;
322 324
323 childregs = ((struct pt_regs *) 325 childregs = ((struct pt_regs *)
@@ -362,10 +364,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
362 if (test_thread_flag(TIF_IA32)) 364 if (test_thread_flag(TIF_IA32))
363 err = do_set_thread_area(p, -1, 365 err = do_set_thread_area(p, -1,
364 (struct user_desc __user *)childregs->si, 0); 366 (struct user_desc __user *)childregs->si, 0);
365 else 367 else
366#endif 368#endif
367 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 369 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
368 if (err) 370 if (err)
369 goto out; 371 goto out;
370 } 372 }
371 err = 0; 373 err = 0;
@@ -544,7 +546,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
544 unsigned fsindex, gsindex; 546 unsigned fsindex, gsindex;
545 547
546 /* we're going to use this soon, after a few expensive things */ 548 /* we're going to use this soon, after a few expensive things */
547 if (next_p->fpu_counter>5) 549 if (next_p->fpu_counter > 5)
548 prefetch(next->xstate); 550 prefetch(next->xstate);
549 551
550 /* 552 /*
@@ -552,13 +554,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
552 */ 554 */
553 load_sp0(tss, next); 555 load_sp0(tss, next);
554 556
555 /* 557 /*
556 * Switch DS and ES. 558 * Switch DS and ES.
557 * This won't pick up thread selector changes, but I guess that is ok. 559 * This won't pick up thread selector changes, but I guess that is ok.
558 */ 560 */
559 savesegment(es, prev->es); 561 savesegment(es, prev->es);
560 if (unlikely(next->es | prev->es)) 562 if (unlikely(next->es | prev->es))
561 loadsegment(es, next->es); 563 loadsegment(es, next->es);
562 564
563 savesegment(ds, prev->ds); 565 savesegment(ds, prev->ds);
564 if (unlikely(next->ds | prev->ds)) 566 if (unlikely(next->ds | prev->ds))
@@ -584,7 +586,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
584 */ 586 */
585 arch_leave_lazy_cpu_mode(); 587 arch_leave_lazy_cpu_mode();
586 588
587 /* 589 /*
588 * Switch FS and GS. 590 * Switch FS and GS.
589 * 591 *
590 * Segment register != 0 always requires a reload. Also 592 * Segment register != 0 always requires a reload. Also
@@ -593,13 +595,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
593 */ 595 */
594 if (unlikely(fsindex | next->fsindex | prev->fs)) { 596 if (unlikely(fsindex | next->fsindex | prev->fs)) {
595 loadsegment(fs, next->fsindex); 597 loadsegment(fs, next->fsindex);
596 /* 598 /*
597 * Check if the user used a selector != 0; if yes 599 * Check if the user used a selector != 0; if yes
598 * clear 64bit base, since overloaded base is always 600 * clear 64bit base, since overloaded base is always
599 * mapped to the Null selector 601 * mapped to the Null selector
600 */ 602 */
601 if (fsindex) 603 if (fsindex)
602 prev->fs = 0; 604 prev->fs = 0;
603 } 605 }
604 /* when next process has a 64bit base use it */ 606 /* when next process has a 64bit base use it */
605 if (next->fs) 607 if (next->fs)
@@ -609,7 +611,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
609 if (unlikely(gsindex | next->gsindex | prev->gs)) { 611 if (unlikely(gsindex | next->gsindex | prev->gs)) {
610 load_gs_index(next->gsindex); 612 load_gs_index(next->gsindex);
611 if (gsindex) 613 if (gsindex)
612 prev->gs = 0; 614 prev->gs = 0;
613 } 615 }
614 if (next->gs) 616 if (next->gs)
615 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 617 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -618,12 +620,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
618 /* Must be after DS reload */ 620 /* Must be after DS reload */
619 unlazy_fpu(prev_p); 621 unlazy_fpu(prev_p);
620 622
621 /* 623 /*
622 * Switch the PDA and FPU contexts. 624 * Switch the PDA and FPU contexts.
623 */ 625 */
624 prev->usersp = read_pda(oldrsp); 626 prev->usersp = read_pda(oldrsp);
625 write_pda(oldrsp, next->usersp); 627 write_pda(oldrsp, next->usersp);
626 write_pda(pcurrent, next_p); 628 write_pda(pcurrent, next_p);
627 629
628 write_pda(kernelstack, 630 write_pda(kernelstack,
629 (unsigned long)task_stack_page(next_p) + 631 (unsigned long)task_stack_page(next_p) +
@@ -664,7 +666,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
664 char __user * __user *envp, struct pt_regs *regs) 666 char __user * __user *envp, struct pt_regs *regs)
665{ 667{
666 long error; 668 long error;
667 char * filename; 669 char *filename;
668 670
669 filename = getname(name); 671 filename = getname(name);
670 error = PTR_ERR(filename); 672 error = PTR_ERR(filename);
@@ -722,55 +724,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
722unsigned long get_wchan(struct task_struct *p) 724unsigned long get_wchan(struct task_struct *p)
723{ 725{
724 unsigned long stack; 726 unsigned long stack;
725 u64 fp,ip; 727 u64 fp, ip;
726 int count = 0; 728 int count = 0;
727 729
728 if (!p || p == current || p->state==TASK_RUNNING) 730 if (!p || p == current || p->state == TASK_RUNNING)
729 return 0; 731 return 0;
730 stack = (unsigned long)task_stack_page(p); 732 stack = (unsigned long)task_stack_page(p);
731 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 733 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
732 return 0; 734 return 0;
733 fp = *(u64 *)(p->thread.sp); 735 fp = *(u64 *)(p->thread.sp);
734 do { 736 do {
735 if (fp < (unsigned long)stack || 737 if (fp < (unsigned long)stack ||
736 fp > (unsigned long)stack+THREAD_SIZE) 738 fp > (unsigned long)stack+THREAD_SIZE)
737 return 0; 739 return 0;
738 ip = *(u64 *)(fp+8); 740 ip = *(u64 *)(fp+8);
739 if (!in_sched_functions(ip)) 741 if (!in_sched_functions(ip))
740 return ip; 742 return ip;
741 fp = *(u64 *)fp; 743 fp = *(u64 *)fp;
742 } while (count++ < 16); 744 } while (count++ < 16);
743 return 0; 745 return 0;
744} 746}
745 747
746long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 748long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
747{ 749{
748 int ret = 0; 750 int ret = 0;
749 int doit = task == current; 751 int doit = task == current;
750 int cpu; 752 int cpu;
751 753
752 switch (code) { 754 switch (code) {
753 case ARCH_SET_GS: 755 case ARCH_SET_GS:
754 if (addr >= TASK_SIZE_OF(task)) 756 if (addr >= TASK_SIZE_OF(task))
755 return -EPERM; 757 return -EPERM;
756 cpu = get_cpu(); 758 cpu = get_cpu();
757 /* handle small bases via the GDT because that's faster to 759 /* handle small bases via the GDT because that's faster to
758 switch. */ 760 switch. */
759 if (addr <= 0xffffffff) { 761 if (addr <= 0xffffffff) {
760 set_32bit_tls(task, GS_TLS, addr); 762 set_32bit_tls(task, GS_TLS, addr);
761 if (doit) { 763 if (doit) {
762 load_TLS(&task->thread, cpu); 764 load_TLS(&task->thread, cpu);
763 load_gs_index(GS_TLS_SEL); 765 load_gs_index(GS_TLS_SEL);
764 } 766 }
765 task->thread.gsindex = GS_TLS_SEL; 767 task->thread.gsindex = GS_TLS_SEL;
766 task->thread.gs = 0; 768 task->thread.gs = 0;
767 } else { 769 } else {
768 task->thread.gsindex = 0; 770 task->thread.gsindex = 0;
769 task->thread.gs = addr; 771 task->thread.gs = addr;
770 if (doit) { 772 if (doit) {
771 load_gs_index(0); 773 load_gs_index(0);
772 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 774 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
773 } 775 }
774 } 776 }
775 put_cpu(); 777 put_cpu();
776 break; 778 break;
@@ -824,8 +826,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
824 rdmsrl(MSR_KERNEL_GS_BASE, base); 826 rdmsrl(MSR_KERNEL_GS_BASE, base);
825 else 827 else
826 base = task->thread.gs; 828 base = task->thread.gs;
827 } 829 } else
828 else
829 base = task->thread.gs; 830 base = task->thread.gs;
830 ret = put_user(base, (unsigned long __user *)addr); 831 ret = put_user(base, (unsigned long __user *)addr);
831 break; 832 break;
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
index 72bbb519d2dc..8b4956e800ac 100644
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@ -24,4 +24,9 @@ struct rt_sigframe {
24 struct ucontext uc; 24 struct ucontext uc;
25 struct siginfo info; 25 struct siginfo info;
26}; 26};
27
28int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
29 sigset_t *set, struct pt_regs *regs);
30int ia32_setup_frame(int sig, struct k_sigaction *ka,
31 sigset_t *set, struct pt_regs *regs);
27#endif 32#endif
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index b45ef8ddd651..87a9c2f28d99 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -19,9 +19,10 @@
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/personality.h> 20#include <linux/personality.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/uaccess.h>
23
22#include <asm/processor.h> 24#include <asm/processor.h>
23#include <asm/ucontext.h> 25#include <asm/ucontext.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h> 26#include <asm/i387.h>
26#include <asm/proto.h> 27#include <asm/proto.h>
27#include <asm/ia32_unistd.h> 28#include <asm/ia32_unistd.h>
@@ -41,11 +42,6 @@
41# define FIX_EFLAGS __FIX_EFLAGS 42# define FIX_EFLAGS __FIX_EFLAGS
42#endif 43#endif
43 44
44int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
45 sigset_t *set, struct pt_regs * regs);
46int ia32_setup_frame(int sig, struct k_sigaction *ka,
47 sigset_t *set, struct pt_regs * regs);
48
49asmlinkage long 45asmlinkage long
50sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 46sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
51 struct pt_regs *regs) 47 struct pt_regs *regs)
@@ -119,7 +115,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
119 /* Always make any pending restarted system calls return -EINTR */ 115 /* Always make any pending restarted system calls return -EINTR */
120 current_thread_info()->restart_block.fn = do_no_restart_syscall; 116 current_thread_info()->restart_block.fn = do_no_restart_syscall;
121 117
122#define COPY(x) err |= __get_user(regs->x, &sc->x) 118#define COPY(x) (err |= __get_user(regs->x, &sc->x))
123 119
124 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 120 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
125 COPY(dx); COPY(cx); COPY(ip); 121 COPY(dx); COPY(cx); COPY(ip);
@@ -149,7 +145,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
149 } 145 }
150 146
151 { 147 {
152 struct _fpstate __user * buf; 148 struct _fpstate __user *buf;
153 err |= __get_user(buf, &sc->fpstate); 149 err |= __get_user(buf, &sc->fpstate);
154 150
155 if (buf) { 151 if (buf) {
@@ -189,7 +185,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
189 current->blocked = set; 185 current->blocked = set;
190 recalc_sigpending(); 186 recalc_sigpending();
191 spin_unlock_irq(&current->sighand->siglock); 187 spin_unlock_irq(&current->sighand->siglock);
192 188
193 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 189 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
194 goto badframe; 190 goto badframe;
195 191
@@ -199,16 +195,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
199 return ax; 195 return ax;
200 196
201badframe: 197badframe:
202 signal_fault(regs,frame,"sigreturn"); 198 signal_fault(regs, frame, "sigreturn");
203 return 0; 199 return 0;
204} 200}
205 201
206/* 202/*
207 * Set up a signal frame. 203 * Set up a signal frame.
208 */ 204 */
209 205
210static inline int 206static inline int
211setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) 207setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
208 unsigned long mask, struct task_struct *me)
212{ 209{
213 int err = 0; 210 int err = 0;
214 211
@@ -264,35 +261,35 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
264} 261}
265 262
266static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 263static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
267 sigset_t *set, struct pt_regs * regs) 264 sigset_t *set, struct pt_regs *regs)
268{ 265{
269 struct rt_sigframe __user *frame; 266 struct rt_sigframe __user *frame;
270 struct _fpstate __user *fp = NULL; 267 struct _fpstate __user *fp = NULL;
271 int err = 0; 268 int err = 0;
272 struct task_struct *me = current; 269 struct task_struct *me = current;
273 270
274 if (used_math()) { 271 if (used_math()) {
275 fp = get_stack(ka, regs, sizeof(struct _fpstate)); 272 fp = get_stack(ka, regs, sizeof(struct _fpstate));
276 frame = (void __user *)round_down( 273 frame = (void __user *)round_down(
277 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; 274 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
278 275
279 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 276 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
280 goto give_sigsegv; 277 goto give_sigsegv;
281 278
282 if (save_i387(fp) < 0) 279 if (save_i387(fp) < 0)
283 err |= -1; 280 err |= -1;
284 } else 281 } else
285 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; 282 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
286 283
287 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 284 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
288 goto give_sigsegv; 285 goto give_sigsegv;
289 286
290 if (ka->sa.sa_flags & SA_SIGINFO) { 287 if (ka->sa.sa_flags & SA_SIGINFO) {
291 err |= copy_siginfo_to_user(&frame->info, info); 288 err |= copy_siginfo_to_user(&frame->info, info);
292 if (err) 289 if (err)
293 goto give_sigsegv; 290 goto give_sigsegv;
294 } 291 }
295 292
296 /* Create the ucontext. */ 293 /* Create the ucontext. */
297 err |= __put_user(0, &frame->uc.uc_flags); 294 err |= __put_user(0, &frame->uc.uc_flags);
298 err |= __put_user(0, &frame->uc.uc_link); 295 err |= __put_user(0, &frame->uc.uc_link);
@@ -302,9 +299,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
302 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 299 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
303 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 300 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
304 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 301 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
305 if (sizeof(*set) == 16) { 302 if (sizeof(*set) == 16) {
306 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 303 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
307 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 304 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
308 } else 305 } else
309 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 306 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
310 307
@@ -315,7 +312,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
315 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 312 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
316 } else { 313 } else {
317 /* could use a vstub here */ 314 /* could use a vstub here */
318 goto give_sigsegv; 315 goto give_sigsegv;
319 } 316 }
320 317
321 if (err) 318 if (err)
@@ -323,7 +320,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
323 320
324 /* Set up registers for signal handler */ 321 /* Set up registers for signal handler */
325 regs->di = sig; 322 regs->di = sig;
326 /* In case the signal handler was declared without prototypes */ 323 /* In case the signal handler was declared without prototypes */
327 regs->ax = 0; 324 regs->ax = 0;
328 325
329 /* This also works for non SA_SIGINFO handlers because they expect the 326 /* This also works for non SA_SIGINFO handlers because they expect the
@@ -376,7 +373,7 @@ static long current_syscall_ret(struct pt_regs *regs)
376 373
377/* 374/*
378 * OK, we're invoking a handler 375 * OK, we're invoking a handler
379 */ 376 */
380 377
381static int 378static int
382handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 379handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
@@ -420,7 +417,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
420 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); 417 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
421 else 418 else
422 ret = ia32_setup_frame(sig, ka, oldset, regs); 419 ret = ia32_setup_frame(sig, ka, oldset, regs);
423 } else 420 } else
424#endif 421#endif
425 ret = setup_rt_frame(sig, ka, info, oldset, regs); 422 ret = setup_rt_frame(sig, ka, info, oldset, regs);
426 423
@@ -448,9 +445,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
448 ptrace_notify(SIGTRAP); 445 ptrace_notify(SIGTRAP);
449 446
450 spin_lock_irq(&current->sighand->siglock); 447 spin_lock_irq(&current->sighand->siglock);
451 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 448 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
452 if (!(ka->sa.sa_flags & SA_NODEFER)) 449 if (!(ka->sa.sa_flags & SA_NODEFER))
453 sigaddset(&current->blocked,sig); 450 sigaddset(&current->blocked, sig);
454 recalc_sigpending(); 451 recalc_sigpending();
455 spin_unlock_irq(&current->sighand->siglock); 452 spin_unlock_irq(&current->sighand->siglock);
456 } 453 }
@@ -552,14 +549,15 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
552} 549}
553 550
554void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 551void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
555{ 552{
556 struct task_struct *me = current; 553 struct task_struct *me = current;
557 if (show_unhandled_signals && printk_ratelimit()) { 554 if (show_unhandled_signals && printk_ratelimit()) {
558 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 555 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
559 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); 556 me->comm, me->pid, where, frame, regs->ip,
557 regs->sp, regs->orig_ax);
560 print_vma_addr(" in ", regs->ip); 558 print_vma_addr(" in ", regs->ip);
561 printk("\n"); 559 printk("\n");
562 } 560 }
563 561
564 force_sig(SIGSEGV, me); 562 force_sig(SIGSEGV, me);
565} 563}
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 3b360ef33817..56eb8f916e9f 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -13,15 +13,16 @@
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/uaccess.h>
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19 19
20asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 20asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long fd, unsigned long off) 21 unsigned long prot, unsigned long flags,
22 unsigned long fd, unsigned long off)
22{ 23{
23 long error; 24 long error;
24 struct file * file; 25 struct file *file;
25 26
26 error = -EINVAL; 27 error = -EINVAL;
27 if (off & ~PAGE_MASK) 28 if (off & ~PAGE_MASK)
@@ -56,9 +57,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
56 unmapped base down for this case. This can give 57 unmapped base down for this case. This can give
57 conflicts with the heap, but we assume that glibc 58 conflicts with the heap, but we assume that glibc
58 malloc knows how to fall back to mmap. Give it 1GB 59 malloc knows how to fall back to mmap. Give it 1GB
59 of playground for now. -AK */ 60 of playground for now. -AK */
60 *begin = 0x40000000; 61 *begin = 0x40000000;
61 *end = 0x80000000; 62 *end = 0x80000000;
62 if (current->flags & PF_RANDOMIZE) { 63 if (current->flags & PF_RANDOMIZE) {
63 new_begin = randomize_range(*begin, *begin + 0x02000000, 0); 64 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
64 if (new_begin) 65 if (new_begin)
@@ -66,9 +67,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
66 } 67 }
67 } else { 68 } else {
68 *begin = TASK_UNMAPPED_BASE; 69 *begin = TASK_UNMAPPED_BASE;
69 *end = TASK_SIZE; 70 *end = TASK_SIZE;
70 } 71 }
71} 72}
72 73
73unsigned long 74unsigned long
74arch_get_unmapped_area(struct file *filp, unsigned long addr, 75arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -78,11 +79,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 struct vm_area_struct *vma; 79 struct vm_area_struct *vma;
79 unsigned long start_addr; 80 unsigned long start_addr;
80 unsigned long begin, end; 81 unsigned long begin, end;
81 82
82 if (flags & MAP_FIXED) 83 if (flags & MAP_FIXED)
83 return addr; 84 return addr;
84 85
85 find_start_end(flags, &begin, &end); 86 find_start_end(flags, &begin, &end);
86 87
87 if (len > end) 88 if (len > end)
88 return -ENOMEM; 89 return -ENOMEM;
@@ -96,12 +97,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
96 } 97 }
97 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 98 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
98 && len <= mm->cached_hole_size) { 99 && len <= mm->cached_hole_size) {
99 mm->cached_hole_size = 0; 100 mm->cached_hole_size = 0;
100 mm->free_area_cache = begin; 101 mm->free_area_cache = begin;
101 } 102 }
102 addr = mm->free_area_cache; 103 addr = mm->free_area_cache;
103 if (addr < begin) 104 if (addr < begin)
104 addr = begin; 105 addr = begin;
105 start_addr = addr; 106 start_addr = addr;
106 107
107full_search: 108full_search:
@@ -127,7 +128,7 @@ full_search:
127 return addr; 128 return addr;
128 } 129 }
129 if (addr + mm->cached_hole_size < vma->vm_start) 130 if (addr + mm->cached_hole_size < vma->vm_start)
130 mm->cached_hole_size = vma->vm_start - addr; 131 mm->cached_hole_size = vma->vm_start - addr;
131 132
132 addr = vma->vm_end; 133 addr = vma->vm_end;
133 } 134 }
@@ -177,7 +178,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 vma = find_vma(mm, addr-len); 178 vma = find_vma(mm, addr-len);
178 if (!vma || addr <= vma->vm_start) 179 if (!vma || addr <= vma->vm_start)
179 /* remember the address as a hint for next time */ 180 /* remember the address as a hint for next time */
180 return (mm->free_area_cache = addr-len); 181 return mm->free_area_cache = addr-len;
181 } 182 }
182 183
183 if (mm->mmap_base < len) 184 if (mm->mmap_base < len)
@@ -194,7 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
194 vma = find_vma(mm, addr); 195 vma = find_vma(mm, addr);
195 if (!vma || addr+len <= vma->vm_start) 196 if (!vma || addr+len <= vma->vm_start)
196 /* remember the address as a hint for next time */ 197 /* remember the address as a hint for next time */
197 return (mm->free_area_cache = addr); 198 return mm->free_area_cache = addr;
198 199
199 /* remember the largest hole we saw so far */ 200 /* remember the largest hole we saw so far */
200 if (addr + mm->cached_hole_size < vma->vm_start) 201 if (addr + mm->cached_hole_size < vma->vm_start)
@@ -224,13 +225,13 @@ bottomup:
224} 225}
225 226
226 227
227asmlinkage long sys_uname(struct new_utsname __user * name) 228asmlinkage long sys_uname(struct new_utsname __user *name)
228{ 229{
229 int err; 230 int err;
230 down_read(&uts_sem); 231 down_read(&uts_sem);
231 err = copy_to_user(name, utsname(), sizeof (*name)); 232 err = copy_to_user(name, utsname(), sizeof(*name));
232 up_read(&uts_sem); 233 up_read(&uts_sem);
233 if (personality(current->personality) == PER_LINUX32) 234 if (personality(current->personality) == PER_LINUX32)
234 err |= copy_to_user(&name->machine, "i686", 5); 235 err |= copy_to_user(&name->machine, "i686", 5);
235 return err ? -EFAULT : 0; 236 return err ? -EFAULT : 0;
236} 237}
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 3f18d73f420c..fe36d96ba70b 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -32,6 +32,8 @@
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/nmi.h> 33#include <linux/nmi.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/smp.h>
36#include <linux/io.h>
35 37
36#if defined(CONFIG_EDAC) 38#if defined(CONFIG_EDAC)
37#include <linux/edac.h> 39#include <linux/edac.h>
@@ -45,9 +47,6 @@
45#include <asm/unwind.h> 47#include <asm/unwind.h>
46#include <asm/desc.h> 48#include <asm/desc.h>
47#include <asm/i387.h> 49#include <asm/i387.h>
48#include <asm/nmi.h>
49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h> 50#include <asm/pgalloc.h>
52#include <asm/proto.h> 51#include <asm/proto.h>
53#include <asm/pda.h> 52#include <asm/pda.h>
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
85 84
86void printk_address(unsigned long address, int reliable) 85void printk_address(unsigned long address, int reliable)
87{ 86{
88 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); 87 printk(" [<%016lx>] %s%pS\n", address, reliable ?
88 "" : "? ", (void *) address);
89} 89}
90 90
91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
98 [STACKFAULT_STACK - 1] = "#SS", 98 [STACKFAULT_STACK - 1] = "#SS",
99 [MCE_STACK - 1] = "#MC", 99 [MCE_STACK - 1] = "#MC",
100#if DEBUG_STKSZ > EXCEPTION_STKSZ 100#if DEBUG_STKSZ > EXCEPTION_STKSZ
101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" 101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ /
102 EXCEPTION_STKSZ - 2] = "#DB[?]"
102#endif 103#endif
103 }; 104 };
104 unsigned k; 105 unsigned k;
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
163} 164}
164 165
165/* 166/*
166 * x86-64 can have up to three kernel stacks: 167 * x86-64 can have up to three kernel stacks:
167 * process stack 168 * process stack
168 * interrupt stack 169 * interrupt stack
169 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 170 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
219 const struct stacktrace_ops *ops, void *data) 220 const struct stacktrace_ops *ops, void *data)
220{ 221{
221 const unsigned cpu = get_cpu(); 222 const unsigned cpu = get_cpu();
222 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; 223 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
223 unsigned used = 0; 224 unsigned used = 0;
224 struct thread_info *tinfo; 225 struct thread_info *tinfo;
225 226
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
237 if (!bp) { 238 if (!bp) {
238 if (task == current) { 239 if (task == current) {
239 /* Grab bp right from our regs */ 240 /* Grab bp right from our regs */
240 asm("movq %%rbp, %0" : "=r" (bp) :); 241 asm("movq %%rbp, %0" : "=r" (bp) : );
241 } else { 242 } else {
242 /* bp is the last reg pushed by switch_to */ 243 /* bp is the last reg pushed by switch_to */
243 bp = *(unsigned long *) task->thread.sp; 244 bp = *(unsigned long *) task->thread.sp;
@@ -357,11 +358,13 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
357 unsigned long *stack; 358 unsigned long *stack;
358 int i; 359 int i;
359 const int cpu = smp_processor_id(); 360 const int cpu = smp_processor_id();
360 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); 361 unsigned long *irqstack_end =
361 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 362 (unsigned long *) (cpu_pda(cpu)->irqstackptr);
363 unsigned long *irqstack =
364 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
362 365
363 // debugging aid: "show_stack(NULL, NULL);" prints the 366 /* debugging aid: "show_stack(NULL, NULL);" prints the
364 // back trace for this cpu. 367 back trace for this cpu. */
365 368
366 if (sp == NULL) { 369 if (sp == NULL) {
367 if (task) 370 if (task)
@@ -404,7 +407,7 @@ void dump_stack(void)
404 407
405#ifdef CONFIG_FRAME_POINTER 408#ifdef CONFIG_FRAME_POINTER
406 if (!bp) 409 if (!bp)
407 asm("movq %%rbp, %0" : "=r" (bp):); 410 asm("movq %%rbp, %0" : "=r" (bp) : );
408#endif 411#endif
409 412
410 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 413 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
@@ -414,7 +417,6 @@ void dump_stack(void)
414 init_utsname()->version); 417 init_utsname()->version);
415 show_trace(NULL, NULL, &stack, bp); 418 show_trace(NULL, NULL, &stack, bp);
416} 419}
417
418EXPORT_SYMBOL(dump_stack); 420EXPORT_SYMBOL(dump_stack);
419 421
420void show_registers(struct pt_regs *regs) 422void show_registers(struct pt_regs *regs)
@@ -493,7 +495,7 @@ unsigned __kprobes long oops_begin(void)
493 raw_local_irq_save(flags); 495 raw_local_irq_save(flags);
494 cpu = smp_processor_id(); 496 cpu = smp_processor_id();
495 if (!__raw_spin_trylock(&die_lock)) { 497 if (!__raw_spin_trylock(&die_lock)) {
496 if (cpu == die_owner) 498 if (cpu == die_owner)
497 /* nested oops. should stop eventually */; 499 /* nested oops. should stop eventually */;
498 else 500 else
499 __raw_spin_lock(&die_lock); 501 __raw_spin_lock(&die_lock);
@@ -638,7 +640,7 @@ kernel_trap:
638} 640}
639 641
640#define DO_ERROR(trapnr, signr, str, name) \ 642#define DO_ERROR(trapnr, signr, str, name) \
641asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 643asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
642{ \ 644{ \
643 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 645 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
644 == NOTIFY_STOP) \ 646 == NOTIFY_STOP) \
@@ -648,7 +650,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
648} 650}
649 651
650#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 652#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
651asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 653asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
652{ \ 654{ \
653 siginfo_t info; \ 655 siginfo_t info; \
654 info.si_signo = signr; \ 656 info.si_signo = signr; \
@@ -683,7 +685,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
683 preempt_conditional_cli(regs); 685 preempt_conditional_cli(regs);
684} 686}
685 687
686asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 688asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
687{ 689{
688 static const char str[] = "double fault"; 690 static const char str[] = "double fault";
689 struct task_struct *tsk = current; 691 struct task_struct *tsk = current;
@@ -778,9 +780,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
778} 780}
779 781
780static notrace __kprobes void 782static notrace __kprobes void
781unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 783unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
782{ 784{
783 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 785 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
786 NOTIFY_STOP)
784 return; 787 return;
785 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", 788 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
786 reason); 789 reason);
@@ -882,7 +885,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
882 else if (user_mode(eregs)) 885 else if (user_mode(eregs))
883 regs = task_pt_regs(current); 886 regs = task_pt_regs(current);
884 /* Exception from kernel and interrupts are enabled. Move to 887 /* Exception from kernel and interrupts are enabled. Move to
885 kernel process stack. */ 888 kernel process stack. */
886 else if (eregs->flags & X86_EFLAGS_IF) 889 else if (eregs->flags & X86_EFLAGS_IF)
887 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 890 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
888 if (eregs != regs) 891 if (eregs != regs)
@@ -891,7 +894,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
891} 894}
892 895
893/* runs on IST stack. */ 896/* runs on IST stack. */
894asmlinkage void __kprobes do_debug(struct pt_regs * regs, 897asmlinkage void __kprobes do_debug(struct pt_regs *regs,
895 unsigned long error_code) 898 unsigned long error_code)
896{ 899{
897 struct task_struct *tsk = current; 900 struct task_struct *tsk = current;
@@ -1035,7 +1038,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
1035 1038
1036asmlinkage void bad_intr(void) 1039asmlinkage void bad_intr(void)
1037{ 1040{
1038 printk("bad interrupt"); 1041 printk("bad interrupt");
1039} 1042}
1040 1043
1041asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 1044asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
@@ -1047,7 +1050,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1047 1050
1048 conditional_sti(regs); 1051 conditional_sti(regs);
1049 if (!user_mode(regs) && 1052 if (!user_mode(regs) &&
1050 kernel_math_error(regs, "kernel simd math error", 19)) 1053 kernel_math_error(regs, "kernel simd math error", 19))
1051 return; 1054 return;
1052 1055
1053 /* 1056 /*
@@ -1092,7 +1095,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1092 force_sig_info(SIGFPE, &info, task); 1095 force_sig_info(SIGFPE, &info, task);
1093} 1096}
1094 1097
1095asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) 1098asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
1096{ 1099{
1097} 1100}
1098 1101
@@ -1142,8 +1145,10 @@ void __init trap_init(void)
1142 set_intr_gate(0, &divide_error); 1145 set_intr_gate(0, &divide_error);
1143 set_intr_gate_ist(1, &debug, DEBUG_STACK); 1146 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1144 set_intr_gate_ist(2, &nmi, NMI_STACK); 1147 set_intr_gate_ist(2, &nmi, NMI_STACK);
1145 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ 1148 /* int3 can be called from all */
1146 set_system_gate(4, &overflow); /* int4 can be called from all */ 1149 set_system_gate_ist(3, &int3, DEBUG_STACK);
1150 /* int4 can be called from all */
1151 set_system_gate(4, &overflow);
1147 set_intr_gate(5, &bounds); 1152 set_intr_gate(5, &bounds);
1148 set_intr_gate(6, &invalid_op); 1153 set_intr_gate(6, &invalid_op);
1149 set_intr_gate(7, &device_not_available); 1154 set_intr_gate(7, &device_not_available);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7603c0553909..46af71676738 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *pm, u64 *hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 41e01b145c48..3059eb45a915 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -25,45 +25,31 @@
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h> 27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/setup.h> 31#include <asm/setup.h>
31#include <asm/e820.h> 32#include <asm/e820.h>
32#include <asm/smp.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <mach_ipi.h>
36 36
37#include "mach_apic.h" 37#include "mach_apic.h"
38 38
39#include <linux/init.h>
40#include <linux/smp.h>
41
42#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
43#include <linux/interrupt.h>
44#include <linux/init.h>
45 40
46#include <asm/io.h>
47#include <asm/apic.h>
48#include <asm/i8259.h> 41#include <asm/i8259.h>
49#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
50#include <asm/visws/cobalt.h>
51#include <asm/visws/lithium.h> 43#include <asm/visws/lithium.h>
52#include <asm/visws/piix4.h>
53 44
54#include <linux/sched.h> 45#include <linux/sched.h>
55#include <linux/kernel.h> 46#include <linux/kernel.h>
56#include <linux/init.h>
57#include <linux/pci.h> 47#include <linux/pci.h>
58#include <linux/pci_ids.h> 48#include <linux/pci_ids.h>
59 49
60extern int no_broadcast; 50extern int no_broadcast;
61 51
62#include <asm/io.h>
63#include <asm/apic.h> 52#include <asm/apic.h>
64#include <asm/arch_hooks.h>
65#include <asm/visws/cobalt.h>
66#include <asm/visws/lithium.h>
67 53
68char visws_board_type = -1; 54char visws_board_type = -1;
69char visws_board_rev = -1; 55char visws_board_rev = -1;
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 62fa440678d8..847c164725f4 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn,
328 328
329 get_memcfg_numa(); 329 get_memcfg_numa();
330 330
331 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 331 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
332 332
333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
334 do { 334 do {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 129618ca0ea2..08a20e6a15c2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -221,7 +221,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
221void __init cleanup_highmap(void) 221void __init cleanup_highmap(void)
222{ 222{
223 unsigned long vaddr = __START_KERNEL_map; 223 unsigned long vaddr = __START_KERNEL_map;
224 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; 224 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
225 pmd_t *pmd = level2_kernel_pgt; 225 pmd_t *pmd = level2_kernel_pgt;
226 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 226 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
227 227
@@ -437,14 +437,14 @@ static void __init find_early_table_space(unsigned long end)
437 unsigned long puds, pmds, ptes, tables, start; 437 unsigned long puds, pmds, ptes, tables, start;
438 438
439 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 439 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
440 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 440 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
441 if (direct_gbpages) { 441 if (direct_gbpages) {
442 unsigned long extra; 442 unsigned long extra;
443 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 443 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
444 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 444 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
445 } else 445 } else
446 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 446 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
447 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 447 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
448 448
449 if (cpu_has_pse) { 449 if (cpu_has_pse) {
450 unsigned long extra; 450 unsigned long extra;
@@ -452,7 +452,7 @@ static void __init find_early_table_space(unsigned long end)
452 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 452 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
453 } else 453 } else
454 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 454 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
455 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 455 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
456 456
457 /* 457 /*
458 * RED-PEN putting page tables only on node 0 could 458 * RED-PEN putting page tables only on node 0 could
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a4dd793d6003..cebcbf152d46 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
79 return 0; 79 return 0;
80 80
81 addr = 0x8000; 81 addr = 0x8000;
82 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 82 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
84 nodemap_size, L1_CACHE_BYTES); 84 nodemap_size, L1_CACHE_BYTES);
85 if (nodemap_addr == -1UL) { 85 if (nodemap_addr == -1UL) {
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; 176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
177 unsigned long bootmap_start, nodedata_phys; 177 unsigned long bootmap_start, nodedata_phys;
178 void *bootmap; 178 void *bootmap;
179 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 179 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
180 int nid; 180 int nid;
181 181
182 start = round_up(start, ZONE_ALIGN); 182 start = roundup(start, ZONE_ALIGN);
183 183
184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
185 start, end); 185 start, end);
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
211 nid = phys_to_nid(nodedata_phys); 211 nid = phys_to_nid(nodedata_phys);
212 if (nid == nodeid) 212 if (nid == nodeid)
213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 213 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
214 else 214 else
215 bootmap_start = round_up(start, PAGE_SIZE); 215 bootmap_start = roundup(start, PAGE_SIZE);
216 /* 216 /*
217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like 217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
218 * to use that to align to PAGE_SIZE 218 * to use that to align to PAGE_SIZE
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 65c6e46bf059..0d254adcc82d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -78,7 +78,7 @@ static inline unsigned long highmap_start_pfn(void)
78 78
79static inline unsigned long highmap_end_pfn(void) 79static inline unsigned long highmap_end_pfn(void)
80{ 80{
81 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; 81 return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
82} 82}
83 83
84#endif 84#endif
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 133c998161ca..e9e09b2ee7cd 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -76,9 +76,7 @@ extern int get_physical_broadcast(void);
76static inline void ack_APIC_irq(void) 76static inline void ack_APIC_irq(void)
77{ 77{
78 /* 78 /*
79 * ack_APIC_irq() actually gets compiled as a single instruction: 79 * ack_APIC_irq() actually gets compiled as a single instruction
80 * - a single rmw on Pentium/82489DX
81 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
82 * ... yummie. 80 * ... yummie.
83 */ 81 */
84 82
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
index 95beda07c6fa..e293ab81e850 100644
--- a/include/asm-x86/mmconfig.h
+++ b/include/asm-x86/mmconfig.h
@@ -3,7 +3,7 @@
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void __cpuinit fam10h_check_enable_mmcfg(void);
6extern void __init check_enable_amd_mmconf_dmi(void); 6extern void __cpuinit check_enable_amd_mmconf_dmi(void);
7#else 7#else
8static inline void fam10h_check_enable_mmcfg(void) { } 8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }