aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
commit3357d4c75f1fb67e7304998c4ad4e9a9fed66fa4 (patch)
treeceba46966a5a1112a05d257d8ecb25ae5eee95e0 /arch/x86_64
parent364f6c717deef4a3ac4982e670fa9846b43cd060 (diff)
parentee98689be1b054897ff17655008c3048fe88be94 (diff)
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/ia32/ia32_aout.c1
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c5
-rw-r--r--arch/x86_64/kernel/aperture.c41
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/ptrace.c17
-rw-r--r--arch/x86_64/kernel/sys_x86_64.c23
-rw-r--r--arch/x86_64/lib/delay.c2
-rw-r--r--arch/x86_64/mm/fault.c2
8 files changed, 40 insertions, 55 deletions
diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c
index 1965efc974dc..c12edf5d97f0 100644
--- a/arch/x86_64/ia32/ia32_aout.c
+++ b/arch/x86_64/ia32/ia32_aout.c
@@ -312,6 +312,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
312 current->mm->brk = ex.a_bss + 312 current->mm->brk = ex.a_bss +
313 (current->mm->start_brk = N_BSSADDR(ex)); 313 (current->mm->start_brk = N_BSSADDR(ex));
314 current->mm->free_area_cache = TASK_UNMAPPED_BASE; 314 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
315 current->mm->cached_hole_size = 0;
315 316
316 set_mm_counter(current->mm, rss, 0); 317 set_mm_counter(current->mm, rss, 0);
317 current->mm->mmap = NULL; 318 current->mm->mmap = NULL;
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 99b522052d16..c8131f342cfc 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -46,7 +46,7 @@ struct elf_phdr;
46 46
47#define IA32_EMULATOR 1 47#define IA32_EMULATOR 1
48 48
49#define ELF_ET_DYN_BASE (TASK_UNMAPPED_32 + 0x1000000) 49#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
50 50
51#undef ELF_ARCH 51#undef ELF_ARCH
52#define ELF_ARCH EM_386 52#define ELF_ARCH EM_386
@@ -307,9 +307,6 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
307 307
308#define elf_addr_t __u32 308#define elf_addr_t __u32
309 309
310#undef TASK_SIZE
311#define TASK_SIZE 0xffffffff
312
313static void elf32_init(struct pt_regs *); 310static void elf32_init(struct pt_regs *);
314 311
315#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 312#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index a491f72cc966..504e63474993 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -33,12 +33,10 @@ int fallback_aper_force __initdata = 0;
33 33
34int fix_aperture __initdata = 1; 34int fix_aperture __initdata = 1;
35 35
36#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16)) 36/* This code runs before the PCI subsystem is initialized, so just
37 access the northbridge directly. */
37 38
38static struct resource aper_res = { 39#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
39 .name = "Aperture",
40 .flags = IORESOURCE_MEM,
41};
42 40
43static u32 __init allocate_aperture(void) 41static u32 __init allocate_aperture(void)
44{ 42{
@@ -55,24 +53,11 @@ static u32 __init allocate_aperture(void)
55 aper_size = (32 * 1024 * 1024) << fallback_aper_order; 53 aper_size = (32 * 1024 * 1024) << fallback_aper_order;
56 54
57 /* 55 /*
58 * Aperture has to be naturally aligned. This means an 2GB 56 * Aperture has to be naturally aligned. This means an 2GB aperture won't
59 * aperture won't have much chances to find a place in the 57 * have much chances to find a place in the lower 4GB of memory.
60 * lower 4GB of memory. Unfortunately we cannot move it up 58 * Unfortunately we cannot move it up because that would make the
61 * because that would make the IOMMU useless. 59 * IOMMU useless.
62 */ 60 */
63
64 /* First try to find some free unused space */
65 if (!allocate_resource(&iomem_resource, &aper_res,
66 aper_size,
67 0, 0xffffffff,
68 aper_size,
69 NULL, NULL)) {
70 printk(KERN_INFO "Putting aperture at %lx-%lx\n",
71 aper_res.start, aper_res.end);
72 return aper_res.start;
73 }
74
75 /* No free space found. Go on to waste some memory... */
76 p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0); 61 p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0);
77 if (!p || __pa(p)+aper_size > 0xffffffff) { 62 if (!p || __pa(p)+aper_size > 0xffffffff) {
78 printk("Cannot allocate aperture memory hole (%p,%uK)\n", 63 printk("Cannot allocate aperture memory hole (%p,%uK)\n",
@@ -81,7 +66,7 @@ static u32 __init allocate_aperture(void)
81 free_bootmem_node(nd0, (unsigned long)p, aper_size); 66 free_bootmem_node(nd0, (unsigned long)p, aper_size);
82 return 0; 67 return 0;
83 } 68 }
84 printk("Mapping aperture over %d KB of precious RAM @ %lx\n", 69 printk("Mapping aperture over %d KB of RAM @ %lx\n",
85 aper_size >> 10, __pa(p)); 70 aper_size >> 10, __pa(p));
86 return (u32)__pa(p); 71 return (u32)__pa(p);
87} 72}
@@ -102,16 +87,10 @@ static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size)
102 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); 87 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name);
103 return 0; 88 return 0;
104 } 89 }
105 /* Don't check the resource here because the aperture is usually
106 in an e820 reserved area, and we allocated these earlier. */
107 return 1; 90 return 1;
108} 91}
109 92
110/* 93/* Find a PCI capability */
111 * Find a PCI capability.
112 * This code runs before the PCI subsystem is initialized, so just
113 * access the northbridge directly.
114 */
115static __u32 __init find_cap(int num, int slot, int func, int cap) 94static __u32 __init find_cap(int num, int slot, int func, int cap)
116{ 95{
117 u8 pos; 96 u8 pos;
@@ -276,6 +255,8 @@ void __init iommu_hole_init(void)
276 fallback_aper_force) { 255 fallback_aper_force) {
277 printk("Your BIOS doesn't leave a aperture memory hole\n"); 256 printk("Your BIOS doesn't leave a aperture memory hole\n");
278 printk("Please enable the IOMMU option in the BIOS setup\n"); 257 printk("Please enable the IOMMU option in the BIOS setup\n");
258 printk("This costs you %d MB of RAM\n",
259 32 << fallback_aper_order);
279 260
280 aper_order = fallback_aper_order; 261 aper_order = fallback_aper_order;
281 aper_alloc = allocate_aperture(); 262 aper_alloc = allocate_aperture();
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 761b6d35e338..dce8bab4306c 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -656,7 +656,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
656 656
657 switch (code) { 657 switch (code) {
658 case ARCH_SET_GS: 658 case ARCH_SET_GS:
659 if (addr >= TASK_SIZE) 659 if (addr >= TASK_SIZE_OF(task))
660 return -EPERM; 660 return -EPERM;
661 cpu = get_cpu(); 661 cpu = get_cpu();
662 /* handle small bases via the GDT because that's faster to 662 /* handle small bases via the GDT because that's faster to
@@ -682,7 +682,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
682 case ARCH_SET_FS: 682 case ARCH_SET_FS:
683 /* Not strictly needed for fs, but do it for symmetry 683 /* Not strictly needed for fs, but do it for symmetry
684 with gs */ 684 with gs */
685 if (addr >= TASK_SIZE) 685 if (addr >= TASK_SIZE_OF(task))
686 return -EPERM; 686 return -EPERM;
687 cpu = get_cpu(); 687 cpu = get_cpu();
688 /* handle small bases via the GDT because that's faster to 688 /* handle small bases via the GDT because that's faster to
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 525f6a128a27..bbf64b59a21e 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -257,12 +257,12 @@ static int putreg(struct task_struct *child,
257 value &= 0xffff; 257 value &= 0xffff;
258 return 0; 258 return 0;
259 case offsetof(struct user_regs_struct,fs_base): 259 case offsetof(struct user_regs_struct,fs_base):
260 if (value >= TASK_SIZE) 260 if (value >= TASK_SIZE_OF(child))
261 return -EIO; 261 return -EIO;
262 child->thread.fs = value; 262 child->thread.fs = value;
263 return 0; 263 return 0;
264 case offsetof(struct user_regs_struct,gs_base): 264 case offsetof(struct user_regs_struct,gs_base):
265 if (value >= TASK_SIZE) 265 if (value >= TASK_SIZE_OF(child))
266 return -EIO; 266 return -EIO;
267 child->thread.gs = value; 267 child->thread.gs = value;
268 return 0; 268 return 0;
@@ -279,7 +279,7 @@ static int putreg(struct task_struct *child,
279 break; 279 break;
280 case offsetof(struct user_regs_struct, rip): 280 case offsetof(struct user_regs_struct, rip):
281 /* Check if the new RIP address is canonical */ 281 /* Check if the new RIP address is canonical */
282 if (value >= TASK_SIZE) 282 if (value >= TASK_SIZE_OF(child))
283 return -EIO; 283 return -EIO;
284 break; 284 break;
285 } 285 }
@@ -419,6 +419,8 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
419 break; 419 break;
420 420
421 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 421 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
422 {
423 int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
422 ret = -EIO; 424 ret = -EIO;
423 if ((addr & 7) || 425 if ((addr & 7) ||
424 addr > sizeof(struct user) - 7) 426 addr > sizeof(struct user) - 7)
@@ -430,22 +432,22 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
430 break; 432 break;
431 /* Disallows to set a breakpoint into the vsyscall */ 433 /* Disallows to set a breakpoint into the vsyscall */
432 case offsetof(struct user, u_debugreg[0]): 434 case offsetof(struct user, u_debugreg[0]):
433 if (data >= TASK_SIZE-7) break; 435 if (data >= TASK_SIZE_OF(child) - dsize) break;
434 child->thread.debugreg0 = data; 436 child->thread.debugreg0 = data;
435 ret = 0; 437 ret = 0;
436 break; 438 break;
437 case offsetof(struct user, u_debugreg[1]): 439 case offsetof(struct user, u_debugreg[1]):
438 if (data >= TASK_SIZE-7) break; 440 if (data >= TASK_SIZE_OF(child) - dsize) break;
439 child->thread.debugreg1 = data; 441 child->thread.debugreg1 = data;
440 ret = 0; 442 ret = 0;
441 break; 443 break;
442 case offsetof(struct user, u_debugreg[2]): 444 case offsetof(struct user, u_debugreg[2]):
443 if (data >= TASK_SIZE-7) break; 445 if (data >= TASK_SIZE_OF(child) - dsize) break;
444 child->thread.debugreg2 = data; 446 child->thread.debugreg2 = data;
445 ret = 0; 447 ret = 0;
446 break; 448 break;
447 case offsetof(struct user, u_debugreg[3]): 449 case offsetof(struct user, u_debugreg[3]):
448 if (data >= TASK_SIZE-7) break; 450 if (data >= TASK_SIZE_OF(child) - dsize) break;
449 child->thread.debugreg3 = data; 451 child->thread.debugreg3 = data;
450 ret = 0; 452 ret = 0;
451 break; 453 break;
@@ -469,6 +471,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
469 break; 471 break;
470 } 472 }
471 break; 473 break;
474 }
472 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ 475 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
473 case PTRACE_CONT: /* restart after signal. */ 476 case PTRACE_CONT: /* restart after signal. */
474 477
diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c
index dbebd5ccba6b..cc7821c68851 100644
--- a/arch/x86_64/kernel/sys_x86_64.c
+++ b/arch/x86_64/kernel/sys_x86_64.c
@@ -68,13 +68,7 @@ out:
68static void find_start_end(unsigned long flags, unsigned long *begin, 68static void find_start_end(unsigned long flags, unsigned long *begin,
69 unsigned long *end) 69 unsigned long *end)
70{ 70{
71#ifdef CONFIG_IA32_EMULATION 71 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
72 if (test_thread_flag(TIF_IA32)) {
73 *begin = TASK_UNMAPPED_32;
74 *end = IA32_PAGE_OFFSET;
75 } else
76#endif
77 if (flags & MAP_32BIT) {
78 /* This is usually used needed to map code in small 72 /* This is usually used needed to map code in small
79 model, so it needs to be in the first 31bit. Limit 73 model, so it needs to be in the first 31bit. Limit
80 it to that. This means we need to move the 74 it to that. This means we need to move the
@@ -84,10 +78,10 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
84 of playground for now. -AK */ 78 of playground for now. -AK */
85 *begin = 0x40000000; 79 *begin = 0x40000000;
86 *end = 0x80000000; 80 *end = 0x80000000;
87 } else { 81 } else {
88 *begin = TASK_UNMAPPED_64; 82 *begin = TASK_UNMAPPED_BASE;
89 *end = TASK_SIZE; 83 *end = TASK_SIZE;
90 } 84 }
91} 85}
92 86
93unsigned long 87unsigned long
@@ -111,6 +105,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
111 (!vma || addr + len <= vma->vm_start)) 105 (!vma || addr + len <= vma->vm_start))
112 return addr; 106 return addr;
113 } 107 }
108 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
109 && len <= mm->cached_hole_size) {
110 mm->cached_hole_size = 0;
111 mm->free_area_cache = begin;
112 }
114 addr = mm->free_area_cache; 113 addr = mm->free_area_cache;
115 if (addr < begin) 114 if (addr < begin)
116 addr = begin; 115 addr = begin;
@@ -126,6 +125,7 @@ full_search:
126 */ 125 */
127 if (start_addr != begin) { 126 if (start_addr != begin) {
128 start_addr = addr = begin; 127 start_addr = addr = begin;
128 mm->cached_hole_size = 0;
129 goto full_search; 129 goto full_search;
130 } 130 }
131 return -ENOMEM; 131 return -ENOMEM;
@@ -137,6 +137,9 @@ full_search:
137 mm->free_area_cache = addr + len; 137 mm->free_area_cache = addr + len;
138 return addr; 138 return addr;
139 } 139 }
140 if (addr + mm->cached_hole_size < vma->vm_start)
141 mm->cached_hole_size = vma->vm_start - addr;
142
140 addr = vma->vm_end; 143 addr = vma->vm_end;
141 } 144 }
142} 145}
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 6e2d66472eb1..aed61a668a1b 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
34 34
35inline void __const_udelay(unsigned long xloops) 35inline void __const_udelay(unsigned long xloops)
36{ 36{
37 __delay(((xloops * cpu_data[_smp_processor_id()].loops_per_jiffy) >> 32) * HZ); 37 __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
38} 38}
39 39
40void __udelay(unsigned long usecs) 40void __udelay(unsigned long usecs)
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 5d6b2114cc9a..57d3ab15a5c7 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -350,7 +350,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
350 * (error_code & 4) == 0, and that the fault was not a 350 * (error_code & 4) == 0, and that the fault was not a
351 * protection error (error_code & 1) == 0. 351 * protection error (error_code & 1) == 0.
352 */ 352 */
353 if (unlikely(address >= TASK_SIZE)) { 353 if (unlikely(address >= TASK_SIZE64)) {
354 if (!(error_code & 5) && 354 if (!(error_code & 5) &&
355 ((address >= VMALLOC_START && address < VMALLOC_END) || 355 ((address >= VMALLOC_START && address < VMALLOC_END) ||
356 (address >= MODULES_VADDR && address < MODULES_END))) { 356 (address >= MODULES_VADDR && address < MODULES_END))) {