diff options
author | Andi Kleen <ak@muc.de> | 2005-04-16 18:24:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:24:55 -0400 |
commit | 1e01441051dda3bb01c455b6e20bce6d00563d82 (patch) | |
tree | 5dc4c69dd4522ca569f70ead0ecbb923f1451891 /arch/x86_64/mm | |
parent | 35faa71484287fc150b8498cd5acae59ad17a356 (diff) |
[PATCH] x86_64: Use a VMA for the 32bit vsyscall
Use a real VMA to map the 32bit vsyscall page
This interacts better with Hugh's upcomming VMA walk optimization
Also removes some ugly special cases.
Code roughly modelled after the ppc64 vdso version from Ben Herrenschmidt.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 12 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 26 |
2 files changed, 8 insertions, 30 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 5724370475cc..d4676ca09144 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -458,17 +458,6 @@ bad_area: | |||
458 | up_read(&mm->mmap_sem); | 458 | up_read(&mm->mmap_sem); |
459 | 459 | ||
460 | bad_area_nosemaphore: | 460 | bad_area_nosemaphore: |
461 | |||
462 | #ifdef CONFIG_IA32_EMULATION | ||
463 | /* 32bit vsyscall. map on demand. */ | ||
464 | if (test_thread_flag(TIF_IA32) && | ||
465 | address >= VSYSCALL32_BASE && address < VSYSCALL32_END) { | ||
466 | if (map_syscall32(mm, address) < 0) | ||
467 | goto out_of_memory2; | ||
468 | return; | ||
469 | } | ||
470 | #endif | ||
471 | |||
472 | /* User mode accesses just cause a SIGSEGV */ | 461 | /* User mode accesses just cause a SIGSEGV */ |
473 | if (error_code & 4) { | 462 | if (error_code & 4) { |
474 | if (is_prefetch(regs, address, error_code)) | 463 | if (is_prefetch(regs, address, error_code)) |
@@ -550,7 +539,6 @@ no_context: | |||
550 | */ | 539 | */ |
551 | out_of_memory: | 540 | out_of_memory: |
552 | up_read(&mm->mmap_sem); | 541 | up_read(&mm->mmap_sem); |
553 | out_of_memory2: | ||
554 | if (current->pid == 1) { | 542 | if (current->pid == 1) { |
555 | yield(); | 543 | yield(); |
556 | goto again; | 544 | goto again; |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index b0d604551d86..dbe53b4c7e66 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -583,9 +583,9 @@ static __init int x8664_sysctl_init(void) | |||
583 | __initcall(x8664_sysctl_init); | 583 | __initcall(x8664_sysctl_init); |
584 | #endif | 584 | #endif |
585 | 585 | ||
586 | /* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two | 586 | /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only |
587 | different ones: one for 32bit and one for 64bit. Use the appropiate | 587 | covers the 64bit vsyscall page now. 32bit has a real VMA now and does |
588 | for the target task. */ | 588 | not need special handling anymore. */ |
589 | 589 | ||
590 | static struct vm_area_struct gate_vma = { | 590 | static struct vm_area_struct gate_vma = { |
591 | .vm_start = VSYSCALL_START, | 591 | .vm_start = VSYSCALL_START, |
@@ -593,22 +593,11 @@ static struct vm_area_struct gate_vma = { | |||
593 | .vm_page_prot = PAGE_READONLY | 593 | .vm_page_prot = PAGE_READONLY |
594 | }; | 594 | }; |
595 | 595 | ||
596 | static struct vm_area_struct gate32_vma = { | ||
597 | .vm_start = VSYSCALL32_BASE, | ||
598 | .vm_end = VSYSCALL32_END, | ||
599 | .vm_page_prot = PAGE_READONLY | ||
600 | }; | ||
601 | |||
602 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | 596 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
603 | { | 597 | { |
604 | #ifdef CONFIG_IA32_EMULATION | 598 | #ifdef CONFIG_IA32_EMULATION |
605 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | 599 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
606 | /* lookup code assumes the pages are present. set them up | 600 | return NULL; |
607 | now */ | ||
608 | if (__map_syscall32(tsk->mm, VSYSCALL32_BASE) < 0) | ||
609 | return NULL; | ||
610 | return &gate32_vma; | ||
611 | } | ||
612 | #endif | 601 | #endif |
613 | return &gate_vma; | 602 | return &gate_vma; |
614 | } | 603 | } |
@@ -616,6 +605,8 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | |||
616 | int in_gate_area(struct task_struct *task, unsigned long addr) | 605 | int in_gate_area(struct task_struct *task, unsigned long addr) |
617 | { | 606 | { |
618 | struct vm_area_struct *vma = get_gate_vma(task); | 607 | struct vm_area_struct *vma = get_gate_vma(task); |
608 | if (!vma) | ||
609 | return 0; | ||
619 | return (addr >= vma->vm_start) && (addr < vma->vm_end); | 610 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
620 | } | 611 | } |
621 | 612 | ||
@@ -625,6 +616,5 @@ int in_gate_area(struct task_struct *task, unsigned long addr) | |||
625 | */ | 616 | */ |
626 | int in_gate_area_no_task(unsigned long addr) | 617 | int in_gate_area_no_task(unsigned long addr) |
627 | { | 618 | { |
628 | return (((addr >= VSYSCALL_START) && (addr < VSYSCALL_END)) || | 619 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
629 | ((addr >= VSYSCALL32_BASE) && (addr < VSYSCALL32_END))); | ||
630 | } | 620 | } |