diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
| commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
| tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/um/sys-i386 | |
| parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) | |
Diffstat (limited to 'arch/um/sys-i386')
43 files changed, 4282 insertions, 0 deletions
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile new file mode 100644 index 00000000000..3923cfb8764 --- /dev/null +++ b/arch/um/sys-i386/Makefile | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | # | ||
| 2 | # Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ | ||
| 6 | ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ | ||
| 7 | sys_call_table.o tls.o atomic64_cx8_32.o mem.o | ||
| 8 | |||
| 9 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
| 10 | |||
| 11 | subarch-obj-y = lib/string_32.o | ||
| 12 | subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o | ||
| 13 | subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o | ||
| 14 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o | ||
| 15 | |||
| 16 | USER_OBJS := bugs.o ptrace_user.o fault.o | ||
| 17 | |||
| 18 | USER_OBJS += user-offsets.s | ||
| 19 | extra-y += user-offsets.s | ||
| 20 | |||
| 21 | UNPROFILE_OBJS := stub_segv.o | ||
| 22 | CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING) | ||
| 23 | |||
| 24 | include arch/um/scripts/Makefile.rules | ||
diff --git a/arch/um/sys-i386/asm/archparam.h b/arch/um/sys-i386/asm/archparam.h new file mode 100644 index 00000000000..2a18a884ca1 --- /dev/null +++ b/arch/um/sys-i386/asm/archparam.h | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __UM_ARCHPARAM_I386_H | ||
| 7 | #define __UM_ARCHPARAM_I386_H | ||
| 8 | |||
| 9 | #ifdef CONFIG_X86_PAE | ||
| 10 | #define LAST_PKMAP 512 | ||
| 11 | #else | ||
| 12 | #define LAST_PKMAP 1024 | ||
| 13 | #endif | ||
| 14 | |||
| 15 | #endif | ||
| 16 | |||
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h new file mode 100644 index 00000000000..42305551d20 --- /dev/null +++ b/arch/um/sys-i386/asm/elf.h | |||
| @@ -0,0 +1,125 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | #ifndef __UM_ELF_I386_H | ||
| 6 | #define __UM_ELF_I386_H | ||
| 7 | |||
| 8 | #include <asm/user.h> | ||
| 9 | #include "skas.h" | ||
| 10 | |||
| 11 | #define R_386_NONE 0 | ||
| 12 | #define R_386_32 1 | ||
| 13 | #define R_386_PC32 2 | ||
| 14 | #define R_386_GOT32 3 | ||
| 15 | #define R_386_PLT32 4 | ||
| 16 | #define R_386_COPY 5 | ||
| 17 | #define R_386_GLOB_DAT 6 | ||
| 18 | #define R_386_JMP_SLOT 7 | ||
| 19 | #define R_386_RELATIVE 8 | ||
| 20 | #define R_386_GOTOFF 9 | ||
| 21 | #define R_386_GOTPC 10 | ||
| 22 | #define R_386_NUM 11 | ||
| 23 | |||
| 24 | typedef unsigned long elf_greg_t; | ||
| 25 | |||
| 26 | #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) | ||
| 27 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
| 28 | |||
| 29 | typedef struct user_i387_struct elf_fpregset_t; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * This is used to ensure we don't load something for the wrong architecture. | ||
| 33 | */ | ||
| 34 | #define elf_check_arch(x) \ | ||
| 35 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | ||
| 36 | |||
| 37 | #define ELF_CLASS ELFCLASS32 | ||
| 38 | #define ELF_DATA ELFDATA2LSB | ||
| 39 | #define ELF_ARCH EM_386 | ||
| 40 | |||
| 41 | #define ELF_PLAT_INIT(regs, load_addr) do { \ | ||
| 42 | PT_REGS_EBX(regs) = 0; \ | ||
| 43 | PT_REGS_ECX(regs) = 0; \ | ||
| 44 | PT_REGS_EDX(regs) = 0; \ | ||
| 45 | PT_REGS_ESI(regs) = 0; \ | ||
| 46 | PT_REGS_EDI(regs) = 0; \ | ||
| 47 | PT_REGS_EBP(regs) = 0; \ | ||
| 48 | PT_REGS_EAX(regs) = 0; \ | ||
| 49 | } while (0) | ||
| 50 | |||
| 51 | #define ELF_EXEC_PAGESIZE 4096 | ||
| 52 | |||
| 53 | #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | ||
| 54 | |||
| 55 | /* Shamelessly stolen from include/asm-i386/elf.h */ | ||
| 56 | |||
| 57 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | ||
| 58 | pr_reg[0] = PT_REGS_EBX(regs); \ | ||
| 59 | pr_reg[1] = PT_REGS_ECX(regs); \ | ||
| 60 | pr_reg[2] = PT_REGS_EDX(regs); \ | ||
| 61 | pr_reg[3] = PT_REGS_ESI(regs); \ | ||
| 62 | pr_reg[4] = PT_REGS_EDI(regs); \ | ||
| 63 | pr_reg[5] = PT_REGS_EBP(regs); \ | ||
| 64 | pr_reg[6] = PT_REGS_EAX(regs); \ | ||
| 65 | pr_reg[7] = PT_REGS_DS(regs); \ | ||
| 66 | pr_reg[8] = PT_REGS_ES(regs); \ | ||
| 67 | /* fake once used fs and gs selectors? */ \ | ||
| 68 | pr_reg[9] = PT_REGS_DS(regs); \ | ||
| 69 | pr_reg[10] = PT_REGS_DS(regs); \ | ||
| 70 | pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \ | ||
| 71 | pr_reg[12] = PT_REGS_IP(regs); \ | ||
| 72 | pr_reg[13] = PT_REGS_CS(regs); \ | ||
| 73 | pr_reg[14] = PT_REGS_EFLAGS(regs); \ | ||
| 74 | pr_reg[15] = PT_REGS_SP(regs); \ | ||
| 75 | pr_reg[16] = PT_REGS_SS(regs); \ | ||
| 76 | } while (0); | ||
| 77 | |||
| 78 | #define task_pt_regs(t) (&(t)->thread.regs) | ||
| 79 | |||
| 80 | struct task_struct; | ||
| 81 | |||
| 82 | extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); | ||
| 83 | |||
| 84 | #define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) | ||
| 85 | |||
| 86 | extern long elf_aux_hwcap; | ||
| 87 | #define ELF_HWCAP (elf_aux_hwcap) | ||
| 88 | |||
| 89 | extern char * elf_aux_platform; | ||
| 90 | #define ELF_PLATFORM (elf_aux_platform) | ||
| 91 | |||
| 92 | #define SET_PERSONALITY(ex) do { } while (0) | ||
| 93 | |||
| 94 | extern unsigned long vsyscall_ehdr; | ||
| 95 | extern unsigned long vsyscall_end; | ||
| 96 | extern unsigned long __kernel_vsyscall; | ||
| 97 | |||
| 98 | #define VSYSCALL_BASE vsyscall_ehdr | ||
| 99 | #define VSYSCALL_END vsyscall_end | ||
| 100 | |||
| 101 | /* | ||
| 102 | * This is the range that is readable by user mode, and things | ||
| 103 | * acting like user mode such as get_user_pages. | ||
| 104 | */ | ||
| 105 | #define FIXADDR_USER_START VSYSCALL_BASE | ||
| 106 | #define FIXADDR_USER_END VSYSCALL_END | ||
| 107 | |||
| 108 | #define __HAVE_ARCH_GATE_AREA 1 | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
| 112 | * for more of them, start the x86-specific ones at 32. | ||
| 113 | */ | ||
| 114 | #define AT_SYSINFO 32 | ||
| 115 | #define AT_SYSINFO_EHDR 33 | ||
| 116 | |||
| 117 | #define ARCH_DLINFO \ | ||
| 118 | do { \ | ||
| 119 | if ( vsyscall_ehdr ) { \ | ||
| 120 | NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \ | ||
| 121 | NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \ | ||
| 122 | } \ | ||
| 123 | } while (0) | ||
| 124 | |||
| 125 | #endif | ||
diff --git a/arch/um/sys-i386/asm/module.h b/arch/um/sys-i386/asm/module.h new file mode 100644 index 00000000000..5ead4a0b2e3 --- /dev/null +++ b/arch/um/sys-i386/asm/module.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef __UM_MODULE_I386_H | ||
| 2 | #define __UM_MODULE_I386_H | ||
| 3 | |||
| 4 | /* UML is simple */ | ||
| 5 | struct mod_arch_specific | ||
| 6 | { | ||
| 7 | }; | ||
| 8 | |||
| 9 | #define Elf_Shdr Elf32_Shdr | ||
| 10 | #define Elf_Sym Elf32_Sym | ||
| 11 | #define Elf_Ehdr Elf32_Ehdr | ||
| 12 | |||
| 13 | #endif | ||
diff --git a/arch/um/sys-i386/asm/processor.h b/arch/um/sys-i386/asm/processor.h new file mode 100644 index 00000000000..82a9061ab5b --- /dev/null +++ b/arch/um/sys-i386/asm/processor.h | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __UM_PROCESSOR_I386_H | ||
| 7 | #define __UM_PROCESSOR_I386_H | ||
| 8 | |||
| 9 | #include "linux/string.h" | ||
| 10 | #include <sysdep/host_ldt.h> | ||
| 11 | #include "asm/segment.h" | ||
| 12 | |||
| 13 | extern int host_has_cmov; | ||
| 14 | |||
| 15 | /* include faultinfo structure */ | ||
| 16 | #include "sysdep/faultinfo.h" | ||
| 17 | |||
| 18 | struct uml_tls_struct { | ||
| 19 | struct user_desc tls; | ||
| 20 | unsigned flushed:1; | ||
| 21 | unsigned present:1; | ||
| 22 | }; | ||
| 23 | |||
| 24 | struct arch_thread { | ||
| 25 | struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
| 26 | unsigned long debugregs[8]; | ||
| 27 | int debugregs_seq; | ||
| 28 | struct faultinfo faultinfo; | ||
| 29 | }; | ||
| 30 | |||
| 31 | #define INIT_ARCH_THREAD { \ | ||
| 32 | .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \ | ||
| 33 | { .present = 0, .flushed = 0 } }, \ | ||
| 34 | .debugregs = { [ 0 ... 7 ] = 0 }, \ | ||
| 35 | .debugregs_seq = 0, \ | ||
| 36 | .faultinfo = { 0, 0, 0 } \ | ||
| 37 | } | ||
| 38 | |||
| 39 | static inline void arch_flush_thread(struct arch_thread *thread) | ||
| 40 | { | ||
| 41 | /* Clear any TLS still hanging */ | ||
| 42 | memset(&thread->tls_array, 0, sizeof(thread->tls_array)); | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void arch_copy_thread(struct arch_thread *from, | ||
| 46 | struct arch_thread *to) | ||
| 47 | { | ||
| 48 | memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array)); | ||
| 49 | } | ||
| 50 | |||
| 51 | #include <asm/user.h> | ||
| 52 | |||
| 53 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
| 54 | static inline void rep_nop(void) | ||
| 55 | { | ||
| 56 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
| 57 | } | ||
| 58 | |||
| 59 | #define cpu_relax() rep_nop() | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Default implementation of macro that returns current | ||
| 63 | * instruction pointer ("program counter"). Stolen | ||
| 64 | * from asm-i386/processor.h | ||
| 65 | */ | ||
| 66 | #define current_text_addr() \ | ||
| 67 | ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) | ||
| 68 | |||
| 69 | #define ARCH_IS_STACKGROW(address) \ | ||
| 70 | (address + 32 >= UPT_SP(¤t->thread.regs.regs)) | ||
| 71 | |||
| 72 | #define KSTK_EIP(tsk) KSTK_REG(tsk, EIP) | ||
| 73 | #define KSTK_ESP(tsk) KSTK_REG(tsk, UESP) | ||
| 74 | #define KSTK_EBP(tsk) KSTK_REG(tsk, EBP) | ||
| 75 | |||
| 76 | #include "asm/processor-generic.h" | ||
| 77 | |||
| 78 | #endif | ||
diff --git a/arch/um/sys-i386/asm/ptrace.h b/arch/um/sys-i386/asm/ptrace.h new file mode 100644 index 00000000000..5d2a5911253 --- /dev/null +++ b/arch/um/sys-i386/asm/ptrace.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __UM_PTRACE_I386_H | ||
| 7 | #define __UM_PTRACE_I386_H | ||
| 8 | |||
| 9 | #define HOST_AUDIT_ARCH AUDIT_ARCH_I386 | ||
| 10 | |||
| 11 | #include "linux/compiler.h" | ||
| 12 | #include "asm/ptrace-generic.h" | ||
| 13 | |||
| 14 | #define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) | ||
| 15 | #define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) | ||
| 16 | #define PT_REGS_ECX(r) UPT_ECX(&(r)->regs) | ||
| 17 | #define PT_REGS_EDX(r) UPT_EDX(&(r)->regs) | ||
| 18 | #define PT_REGS_ESI(r) UPT_ESI(&(r)->regs) | ||
| 19 | #define PT_REGS_EDI(r) UPT_EDI(&(r)->regs) | ||
| 20 | #define PT_REGS_EBP(r) UPT_EBP(&(r)->regs) | ||
| 21 | |||
| 22 | #define PT_REGS_CS(r) UPT_CS(&(r)->regs) | ||
| 23 | #define PT_REGS_SS(r) UPT_SS(&(r)->regs) | ||
| 24 | #define PT_REGS_DS(r) UPT_DS(&(r)->regs) | ||
| 25 | #define PT_REGS_ES(r) UPT_ES(&(r)->regs) | ||
| 26 | #define PT_REGS_FS(r) UPT_FS(&(r)->regs) | ||
| 27 | #define PT_REGS_GS(r) UPT_GS(&(r)->regs) | ||
| 28 | |||
| 29 | #define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs) | ||
| 30 | |||
| 31 | #define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r) | ||
| 32 | #define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r) | ||
| 33 | #define PT_FIX_EXEC_STACK(sp) do ; while(0) | ||
| 34 | |||
| 35 | #define profile_pc(regs) PT_REGS_IP(regs) | ||
| 36 | |||
| 37 | #define user_mode(r) UPT_IS_USER(&(r)->regs) | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Forward declaration to avoid including sysdep/tls.h, which causes a | ||
| 41 | * circular include, and compilation failures. | ||
| 42 | */ | ||
| 43 | struct user_desc; | ||
| 44 | |||
| 45 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
| 46 | struct user_desc __user *user_desc); | ||
| 47 | |||
| 48 | extern int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
| 49 | struct user_desc __user *user_desc); | ||
| 50 | |||
| 51 | #endif | ||
diff --git a/arch/um/sys-i386/atomic64_cx8_32.S b/arch/um/sys-i386/atomic64_cx8_32.S new file mode 100644 index 00000000000..1e901d3d4a9 --- /dev/null +++ b/arch/um/sys-i386/atomic64_cx8_32.S | |||
| @@ -0,0 +1,225 @@ | |||
| 1 | /* | ||
| 2 | * atomic64_t for 586+ | ||
| 3 | * | ||
| 4 | * Copied from arch/x86/lib/atomic64_cx8_32.S | ||
| 5 | * | ||
| 6 | * Copyright © 2010 Luca Barbieri | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/linkage.h> | ||
| 16 | #include <asm/alternative-asm.h> | ||
| 17 | #include <asm/dwarf2.h> | ||
| 18 | |||
| 19 | .macro SAVE reg | ||
| 20 | pushl_cfi %\reg | ||
| 21 | CFI_REL_OFFSET \reg, 0 | ||
| 22 | .endm | ||
| 23 | |||
| 24 | .macro RESTORE reg | ||
| 25 | popl_cfi %\reg | ||
| 26 | CFI_RESTORE \reg | ||
| 27 | .endm | ||
| 28 | |||
| 29 | .macro read64 reg | ||
| 30 | movl %ebx, %eax | ||
| 31 | movl %ecx, %edx | ||
| 32 | /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ | ||
| 33 | LOCK_PREFIX | ||
| 34 | cmpxchg8b (\reg) | ||
| 35 | .endm | ||
| 36 | |||
| 37 | ENTRY(atomic64_read_cx8) | ||
| 38 | CFI_STARTPROC | ||
| 39 | |||
| 40 | read64 %ecx | ||
| 41 | ret | ||
| 42 | CFI_ENDPROC | ||
| 43 | ENDPROC(atomic64_read_cx8) | ||
| 44 | |||
| 45 | ENTRY(atomic64_set_cx8) | ||
| 46 | CFI_STARTPROC | ||
| 47 | |||
| 48 | 1: | ||
| 49 | /* we don't need LOCK_PREFIX since aligned 64-bit writes | ||
| 50 | * are atomic on 586 and newer */ | ||
| 51 | cmpxchg8b (%esi) | ||
| 52 | jne 1b | ||
| 53 | |||
| 54 | ret | ||
| 55 | CFI_ENDPROC | ||
| 56 | ENDPROC(atomic64_set_cx8) | ||
| 57 | |||
| 58 | ENTRY(atomic64_xchg_cx8) | ||
| 59 | CFI_STARTPROC | ||
| 60 | |||
| 61 | movl %ebx, %eax | ||
| 62 | movl %ecx, %edx | ||
| 63 | 1: | ||
| 64 | LOCK_PREFIX | ||
| 65 | cmpxchg8b (%esi) | ||
| 66 | jne 1b | ||
| 67 | |||
| 68 | ret | ||
| 69 | CFI_ENDPROC | ||
| 70 | ENDPROC(atomic64_xchg_cx8) | ||
| 71 | |||
| 72 | .macro addsub_return func ins insc | ||
| 73 | ENTRY(atomic64_\func\()_return_cx8) | ||
| 74 | CFI_STARTPROC | ||
| 75 | SAVE ebp | ||
| 76 | SAVE ebx | ||
| 77 | SAVE esi | ||
| 78 | SAVE edi | ||
| 79 | |||
| 80 | movl %eax, %esi | ||
| 81 | movl %edx, %edi | ||
| 82 | movl %ecx, %ebp | ||
| 83 | |||
| 84 | read64 %ebp | ||
| 85 | 1: | ||
| 86 | movl %eax, %ebx | ||
| 87 | movl %edx, %ecx | ||
| 88 | \ins\()l %esi, %ebx | ||
| 89 | \insc\()l %edi, %ecx | ||
| 90 | LOCK_PREFIX | ||
| 91 | cmpxchg8b (%ebp) | ||
| 92 | jne 1b | ||
| 93 | |||
| 94 | 10: | ||
| 95 | movl %ebx, %eax | ||
| 96 | movl %ecx, %edx | ||
| 97 | RESTORE edi | ||
| 98 | RESTORE esi | ||
| 99 | RESTORE ebx | ||
| 100 | RESTORE ebp | ||
| 101 | ret | ||
| 102 | CFI_ENDPROC | ||
| 103 | ENDPROC(atomic64_\func\()_return_cx8) | ||
| 104 | .endm | ||
| 105 | |||
| 106 | addsub_return add add adc | ||
| 107 | addsub_return sub sub sbb | ||
| 108 | |||
| 109 | .macro incdec_return func ins insc | ||
| 110 | ENTRY(atomic64_\func\()_return_cx8) | ||
| 111 | CFI_STARTPROC | ||
| 112 | SAVE ebx | ||
| 113 | |||
| 114 | read64 %esi | ||
| 115 | 1: | ||
| 116 | movl %eax, %ebx | ||
| 117 | movl %edx, %ecx | ||
| 118 | \ins\()l $1, %ebx | ||
| 119 | \insc\()l $0, %ecx | ||
| 120 | LOCK_PREFIX | ||
| 121 | cmpxchg8b (%esi) | ||
| 122 | jne 1b | ||
| 123 | |||
| 124 | 10: | ||
| 125 | movl %ebx, %eax | ||
| 126 | movl %ecx, %edx | ||
| 127 | RESTORE ebx | ||
| 128 | ret | ||
| 129 | CFI_ENDPROC | ||
| 130 | ENDPROC(atomic64_\func\()_return_cx8) | ||
| 131 | .endm | ||
| 132 | |||
| 133 | incdec_return inc add adc | ||
| 134 | incdec_return dec sub sbb | ||
| 135 | |||
| 136 | ENTRY(atomic64_dec_if_positive_cx8) | ||
| 137 | CFI_STARTPROC | ||
| 138 | SAVE ebx | ||
| 139 | |||
| 140 | read64 %esi | ||
| 141 | 1: | ||
| 142 | movl %eax, %ebx | ||
| 143 | movl %edx, %ecx | ||
| 144 | subl $1, %ebx | ||
| 145 | sbb $0, %ecx | ||
| 146 | js 2f | ||
| 147 | LOCK_PREFIX | ||
| 148 | cmpxchg8b (%esi) | ||
| 149 | jne 1b | ||
| 150 | |||
| 151 | 2: | ||
| 152 | movl %ebx, %eax | ||
| 153 | movl %ecx, %edx | ||
| 154 | RESTORE ebx | ||
| 155 | ret | ||
| 156 | CFI_ENDPROC | ||
| 157 | ENDPROC(atomic64_dec_if_positive_cx8) | ||
| 158 | |||
| 159 | ENTRY(atomic64_add_unless_cx8) | ||
| 160 | CFI_STARTPROC | ||
| 161 | SAVE ebp | ||
| 162 | SAVE ebx | ||
| 163 | /* these just push these two parameters on the stack */ | ||
| 164 | SAVE edi | ||
| 165 | SAVE esi | ||
| 166 | |||
| 167 | movl %ecx, %ebp | ||
| 168 | movl %eax, %esi | ||
| 169 | movl %edx, %edi | ||
| 170 | |||
| 171 | read64 %ebp | ||
| 172 | 1: | ||
| 173 | cmpl %eax, 0(%esp) | ||
| 174 | je 4f | ||
| 175 | 2: | ||
| 176 | movl %eax, %ebx | ||
| 177 | movl %edx, %ecx | ||
| 178 | addl %esi, %ebx | ||
| 179 | adcl %edi, %ecx | ||
| 180 | LOCK_PREFIX | ||
| 181 | cmpxchg8b (%ebp) | ||
| 182 | jne 1b | ||
| 183 | |||
| 184 | movl $1, %eax | ||
| 185 | 3: | ||
| 186 | addl $8, %esp | ||
| 187 | CFI_ADJUST_CFA_OFFSET -8 | ||
| 188 | RESTORE ebx | ||
| 189 | RESTORE ebp | ||
| 190 | ret | ||
| 191 | 4: | ||
| 192 | cmpl %edx, 4(%esp) | ||
| 193 | jne 2b | ||
| 194 | xorl %eax, %eax | ||
| 195 | jmp 3b | ||
| 196 | CFI_ENDPROC | ||
| 197 | ENDPROC(atomic64_add_unless_cx8) | ||
| 198 | |||
| 199 | ENTRY(atomic64_inc_not_zero_cx8) | ||
| 200 | CFI_STARTPROC | ||
| 201 | SAVE ebx | ||
| 202 | |||
| 203 | read64 %esi | ||
| 204 | 1: | ||
| 205 | testl %eax, %eax | ||
| 206 | je 4f | ||
| 207 | 2: | ||
| 208 | movl %eax, %ebx | ||
| 209 | movl %edx, %ecx | ||
| 210 | addl $1, %ebx | ||
| 211 | adcl $0, %ecx | ||
| 212 | LOCK_PREFIX | ||
| 213 | cmpxchg8b (%esi) | ||
| 214 | jne 1b | ||
| 215 | |||
| 216 | movl $1, %eax | ||
| 217 | 3: | ||
| 218 | RESTORE ebx | ||
| 219 | ret | ||
| 220 | 4: | ||
| 221 | testl %edx, %edx | ||
| 222 | jne 2b | ||
| 223 | jmp 3b | ||
| 224 | CFI_ENDPROC | ||
| 225 | ENDPROC(atomic64_inc_not_zero_cx8) | ||
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c new file mode 100644 index 00000000000..8d4f273f121 --- /dev/null +++ b/arch/um/sys-i386/bug.c | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL V2 | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/uaccess.h> | ||
| 7 | #include <asm/errno.h> | ||
| 8 | |||
| 9 | /* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because | ||
| 10 | * that's not relevant in skas mode. | ||
| 11 | */ | ||
| 12 | |||
| 13 | int is_valid_bugaddr(unsigned long eip) | ||
| 14 | { | ||
| 15 | unsigned short ud2; | ||
| 16 | |||
| 17 | if (probe_kernel_address((unsigned short __user *)eip, ud2)) | ||
| 18 | return 0; | ||
| 19 | |||
| 20 | return ud2 == 0x0b0f; | ||
| 21 | } | ||
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c new file mode 100644 index 00000000000..2c6d0d731c1 --- /dev/null +++ b/arch/um/sys-i386/bugs.c | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <signal.h> | ||
| 7 | #include "kern_constants.h" | ||
| 8 | #include "kern_util.h" | ||
| 9 | #include "longjmp.h" | ||
| 10 | #include "task.h" | ||
| 11 | #include "user.h" | ||
| 12 | #include "sysdep/ptrace.h" | ||
| 13 | |||
| 14 | /* Set during early boot */ | ||
| 15 | static int host_has_cmov = 1; | ||
| 16 | static jmp_buf cmov_test_return; | ||
| 17 | |||
| 18 | static void cmov_sigill_test_handler(int sig) | ||
| 19 | { | ||
| 20 | host_has_cmov = 0; | ||
| 21 | longjmp(cmov_test_return, 1); | ||
| 22 | } | ||
| 23 | |||
| 24 | void arch_check_bugs(void) | ||
| 25 | { | ||
| 26 | struct sigaction old, new; | ||
| 27 | |||
| 28 | printk(UM_KERN_INFO "Checking for host processor cmov support..."); | ||
| 29 | new.sa_handler = cmov_sigill_test_handler; | ||
| 30 | |||
| 31 | /* Make sure that SIGILL is enabled after the handler longjmps back */ | ||
| 32 | new.sa_flags = SA_NODEFER; | ||
| 33 | sigemptyset(&new.sa_mask); | ||
| 34 | sigaction(SIGILL, &new, &old); | ||
| 35 | |||
| 36 | if (setjmp(cmov_test_return) == 0) { | ||
| 37 | unsigned long foo = 0; | ||
| 38 | __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo)); | ||
| 39 | printk(UM_KERN_CONT "Yes\n"); | ||
| 40 | } else | ||
| 41 | printk(UM_KERN_CONT "No\n"); | ||
| 42 | |||
| 43 | sigaction(SIGILL, &old, &new); | ||
| 44 | } | ||
| 45 | |||
| 46 | void arch_examine_signal(int sig, struct uml_pt_regs *regs) | ||
| 47 | { | ||
| 48 | unsigned char tmp[2]; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * This is testing for a cmov (0x0f 0x4x) instruction causing a | ||
| 52 | * SIGILL in init. | ||
| 53 | */ | ||
| 54 | if ((sig != SIGILL) || (TASK_PID(get_current()) != 1)) | ||
| 55 | return; | ||
| 56 | |||
| 57 | if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) { | ||
| 58 | printk(UM_KERN_ERR "SIGILL in init, could not read " | ||
| 59 | "instructions!\n"); | ||
| 60 | return; | ||
| 61 | } | ||
| 62 | |||
| 63 | if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40)) | ||
| 64 | return; | ||
| 65 | |||
| 66 | if (host_has_cmov == 0) | ||
| 67 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
| 68 | "processor doesn't implement. Boot a filesystem " | ||
| 69 | "compiled for older processors"); | ||
| 70 | else if (host_has_cmov == 1) | ||
| 71 | printk(UM_KERN_ERR "SIGILL caused by cmov, which this " | ||
| 72 | "processor claims to implement"); | ||
| 73 | else | ||
| 74 | printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)", | ||
| 75 | host_has_cmov); | ||
| 76 | } | ||
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S new file mode 100644 index 00000000000..f058d2f82e1 --- /dev/null +++ b/arch/um/sys-i386/checksum.S | |||
| @@ -0,0 +1,458 @@ | |||
| 1 | /* | ||
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
| 3 | * operating system. INET is implemented using the BSD Socket | ||
| 4 | * interface as the means of communication with the user level. | ||
| 5 | * | ||
| 6 | * IP/TCP/UDP checksumming routines | ||
| 7 | * | ||
| 8 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
| 9 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
| 10 | * Tom May, <ftom@netcom.com> | ||
| 11 | * Pentium Pro/II routines: | ||
| 12 | * Alexander Kjeldaas <astor@guardian.no> | ||
| 13 | * Finn Arne Gangstad <finnag@guardian.no> | ||
| 14 | * Lots of code moved from tcp.c and ip.c; see those files | ||
| 15 | * for more names. | ||
| 16 | * | ||
| 17 | * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception | ||
| 18 | * handling. | ||
| 19 | * Andi Kleen, add zeroing on error | ||
| 20 | * converted to pure assembler | ||
| 21 | * | ||
| 22 | * This program is free software; you can redistribute it and/or | ||
| 23 | * modify it under the terms of the GNU General Public License | ||
| 24 | * as published by the Free Software Foundation; either version | ||
| 25 | * 2 of the License, or (at your option) any later version. | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <asm/errno.h> | ||
| 29 | |||
| 30 | /* | ||
| 31 | * computes a partial checksum, e.g. for TCP/UDP fragments | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* | ||
| 35 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | ||
| 36 | */ | ||
| 37 | |||
| 38 | .text | ||
| 39 | .align 4 | ||
| 40 | .globl csum_partial | ||
| 41 | |||
| 42 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Experiments with Ethernet and SLIP connections show that buff | ||
| 46 | * is aligned on either a 2-byte or 4-byte boundary. We get at | ||
| 47 | * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. | ||
| 48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | ||
| 49 | * alignment for the unrolled loop. | ||
| 50 | */ | ||
| 51 | csum_partial: | ||
| 52 | pushl %esi | ||
| 53 | pushl %ebx | ||
| 54 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
| 55 | movl 16(%esp),%ecx # Function arg: int len | ||
| 56 | movl 12(%esp),%esi # Function arg: unsigned char *buff | ||
| 57 | testl $2, %esi # Check alignment. | ||
| 58 | jz 2f # Jump if alignment is ok. | ||
| 59 | subl $2, %ecx # Alignment uses up two bytes. | ||
| 60 | jae 1f # Jump if we had at least two bytes. | ||
| 61 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
| 62 | jmp 4f | ||
| 63 | 1: movw (%esi), %bx | ||
| 64 | addl $2, %esi | ||
| 65 | addw %bx, %ax | ||
| 66 | adcl $0, %eax | ||
| 67 | 2: | ||
| 68 | movl %ecx, %edx | ||
| 69 | shrl $5, %ecx | ||
| 70 | jz 2f | ||
| 71 | testl %esi, %esi | ||
| 72 | 1: movl (%esi), %ebx | ||
| 73 | adcl %ebx, %eax | ||
| 74 | movl 4(%esi), %ebx | ||
| 75 | adcl %ebx, %eax | ||
| 76 | movl 8(%esi), %ebx | ||
| 77 | adcl %ebx, %eax | ||
| 78 | movl 12(%esi), %ebx | ||
| 79 | adcl %ebx, %eax | ||
| 80 | movl 16(%esi), %ebx | ||
| 81 | adcl %ebx, %eax | ||
| 82 | movl 20(%esi), %ebx | ||
| 83 | adcl %ebx, %eax | ||
| 84 | movl 24(%esi), %ebx | ||
| 85 | adcl %ebx, %eax | ||
| 86 | movl 28(%esi), %ebx | ||
| 87 | adcl %ebx, %eax | ||
| 88 | lea 32(%esi), %esi | ||
| 89 | dec %ecx | ||
| 90 | jne 1b | ||
| 91 | adcl $0, %eax | ||
| 92 | 2: movl %edx, %ecx | ||
| 93 | andl $0x1c, %edx | ||
| 94 | je 4f | ||
| 95 | shrl $2, %edx # This clears CF | ||
| 96 | 3: adcl (%esi), %eax | ||
| 97 | lea 4(%esi), %esi | ||
| 98 | dec %edx | ||
| 99 | jne 3b | ||
| 100 | adcl $0, %eax | ||
| 101 | 4: andl $3, %ecx | ||
| 102 | jz 7f | ||
| 103 | cmpl $2, %ecx | ||
| 104 | jb 5f | ||
| 105 | movw (%esi),%cx | ||
| 106 | leal 2(%esi),%esi | ||
| 107 | je 6f | ||
| 108 | shll $16,%ecx | ||
| 109 | 5: movb (%esi),%cl | ||
| 110 | 6: addl %ecx,%eax | ||
| 111 | adcl $0, %eax | ||
| 112 | 7: | ||
| 113 | popl %ebx | ||
| 114 | popl %esi | ||
| 115 | ret | ||
| 116 | |||
| 117 | #else | ||
| 118 | |||
| 119 | /* Version for PentiumII/PPro */ | ||
| 120 | |||
| 121 | csum_partial: | ||
| 122 | pushl %esi | ||
| 123 | pushl %ebx | ||
| 124 | movl 20(%esp),%eax # Function arg: unsigned int sum | ||
| 125 | movl 16(%esp),%ecx # Function arg: int len | ||
| 126 | movl 12(%esp),%esi # Function arg: const unsigned char *buf | ||
| 127 | |||
| 128 | testl $2, %esi | ||
| 129 | jnz 30f | ||
| 130 | 10: | ||
| 131 | movl %ecx, %edx | ||
| 132 | movl %ecx, %ebx | ||
| 133 | andl $0x7c, %ebx | ||
| 134 | shrl $7, %ecx | ||
| 135 | addl %ebx,%esi | ||
| 136 | shrl $2, %ebx | ||
| 137 | negl %ebx | ||
| 138 | lea 45f(%ebx,%ebx,2), %ebx | ||
| 139 | testl %esi, %esi | ||
| 140 | jmp *%ebx | ||
| 141 | |||
| 142 | # Handle 2-byte-aligned regions | ||
| 143 | 20: addw (%esi), %ax | ||
| 144 | lea 2(%esi), %esi | ||
| 145 | adcl $0, %eax | ||
| 146 | jmp 10b | ||
| 147 | |||
| 148 | 30: subl $2, %ecx | ||
| 149 | ja 20b | ||
| 150 | je 32f | ||
| 151 | movzbl (%esi),%ebx # csumming 1 byte, 2-aligned | ||
| 152 | addl %ebx, %eax | ||
| 153 | adcl $0, %eax | ||
| 154 | jmp 80f | ||
| 155 | 32: | ||
| 156 | addw (%esi), %ax # csumming 2 bytes, 2-aligned | ||
| 157 | adcl $0, %eax | ||
| 158 | jmp 80f | ||
| 159 | |||
| 160 | 40: | ||
| 161 | addl -128(%esi), %eax | ||
| 162 | adcl -124(%esi), %eax | ||
| 163 | adcl -120(%esi), %eax | ||
| 164 | adcl -116(%esi), %eax | ||
| 165 | adcl -112(%esi), %eax | ||
| 166 | adcl -108(%esi), %eax | ||
| 167 | adcl -104(%esi), %eax | ||
| 168 | adcl -100(%esi), %eax | ||
| 169 | adcl -96(%esi), %eax | ||
| 170 | adcl -92(%esi), %eax | ||
| 171 | adcl -88(%esi), %eax | ||
| 172 | adcl -84(%esi), %eax | ||
| 173 | adcl -80(%esi), %eax | ||
| 174 | adcl -76(%esi), %eax | ||
| 175 | adcl -72(%esi), %eax | ||
| 176 | adcl -68(%esi), %eax | ||
| 177 | adcl -64(%esi), %eax | ||
| 178 | adcl -60(%esi), %eax | ||
| 179 | adcl -56(%esi), %eax | ||
| 180 | adcl -52(%esi), %eax | ||
| 181 | adcl -48(%esi), %eax | ||
| 182 | adcl -44(%esi), %eax | ||
| 183 | adcl -40(%esi), %eax | ||
| 184 | adcl -36(%esi), %eax | ||
| 185 | adcl -32(%esi), %eax | ||
| 186 | adcl -28(%esi), %eax | ||
| 187 | adcl -24(%esi), %eax | ||
| 188 | adcl -20(%esi), %eax | ||
| 189 | adcl -16(%esi), %eax | ||
| 190 | adcl -12(%esi), %eax | ||
| 191 | adcl -8(%esi), %eax | ||
| 192 | adcl -4(%esi), %eax | ||
| 193 | 45: | ||
| 194 | lea 128(%esi), %esi | ||
| 195 | adcl $0, %eax | ||
| 196 | dec %ecx | ||
| 197 | jge 40b | ||
| 198 | movl %edx, %ecx | ||
| 199 | 50: andl $3, %ecx | ||
| 200 | jz 80f | ||
| 201 | |||
| 202 | # Handle the last 1-3 bytes without jumping | ||
| 203 | notl %ecx # 1->2, 2->1, 3->0, higher bits are masked | ||
| 204 | movl $0xffffff,%ebx # by the shll and shrl instructions | ||
| 205 | shll $3,%ecx | ||
| 206 | shrl %cl,%ebx | ||
| 207 | andl -128(%esi),%ebx # esi is 4-aligned so should be ok | ||
| 208 | addl %ebx,%eax | ||
| 209 | adcl $0,%eax | ||
| 210 | 80: | ||
| 211 | popl %ebx | ||
| 212 | popl %esi | ||
| 213 | ret | ||
| 214 | |||
| 215 | #endif | ||
| 216 | |||
| 217 | /* | ||
| 218 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | ||
| 219 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | ||
| 220 | */ | ||
| 221 | |||
| 222 | /* | ||
| 223 | * Copy from ds while checksumming, otherwise like csum_partial | ||
| 224 | * | ||
| 225 | * The macros SRC and DST specify the type of access for the instruction. | ||
| 226 | * thus we can call a custom exception handler for all access types. | ||
| 227 | * | ||
| 228 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | ||
| 229 | * DST definitions? It's damn hard to trigger all cases. I hope I got | ||
| 230 | * them all but there's no guarantee. | ||
| 231 | */ | ||
| 232 | |||
| 233 | #define SRC(y...) \ | ||
| 234 | 9999: y; \ | ||
| 235 | .section __ex_table, "a"; \ | ||
| 236 | .long 9999b, 6001f ; \ | ||
| 237 | .previous | ||
| 238 | |||
| 239 | #define DST(y...) \ | ||
| 240 | 9999: y; \ | ||
| 241 | .section __ex_table, "a"; \ | ||
| 242 | .long 9999b, 6002f ; \ | ||
| 243 | .previous | ||
| 244 | |||
| 245 | .align 4 | ||
| 246 | |||
| 247 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
| 248 | |||
| 249 | #define ARGBASE 16 | ||
| 250 | #define FP 12 | ||
| 251 | |||
| 252 | csum_partial_copy_generic_i386: | ||
| 253 | subl $4,%esp | ||
| 254 | pushl %edi | ||
| 255 | pushl %esi | ||
| 256 | pushl %ebx | ||
| 257 | movl ARGBASE+16(%esp),%eax # sum | ||
| 258 | movl ARGBASE+12(%esp),%ecx # len | ||
| 259 | movl ARGBASE+4(%esp),%esi # src | ||
| 260 | movl ARGBASE+8(%esp),%edi # dst | ||
| 261 | |||
| 262 | testl $2, %edi # Check alignment. | ||
| 263 | jz 2f # Jump if alignment is ok. | ||
| 264 | subl $2, %ecx # Alignment uses up two bytes. | ||
| 265 | jae 1f # Jump if we had at least two bytes. | ||
| 266 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
| 267 | jmp 4f | ||
| 268 | SRC(1: movw (%esi), %bx ) | ||
| 269 | addl $2, %esi | ||
| 270 | DST( movw %bx, (%edi) ) | ||
| 271 | addl $2, %edi | ||
| 272 | addw %bx, %ax | ||
| 273 | adcl $0, %eax | ||
| 274 | 2: | ||
| 275 | movl %ecx, FP(%esp) | ||
| 276 | shrl $5, %ecx | ||
| 277 | jz 2f | ||
| 278 | testl %esi, %esi | ||
| 279 | SRC(1: movl (%esi), %ebx ) | ||
| 280 | SRC( movl 4(%esi), %edx ) | ||
| 281 | adcl %ebx, %eax | ||
| 282 | DST( movl %ebx, (%edi) ) | ||
| 283 | adcl %edx, %eax | ||
| 284 | DST( movl %edx, 4(%edi) ) | ||
| 285 | |||
| 286 | SRC( movl 8(%esi), %ebx ) | ||
| 287 | SRC( movl 12(%esi), %edx ) | ||
| 288 | adcl %ebx, %eax | ||
| 289 | DST( movl %ebx, 8(%edi) ) | ||
| 290 | adcl %edx, %eax | ||
| 291 | DST( movl %edx, 12(%edi) ) | ||
| 292 | |||
| 293 | SRC( movl 16(%esi), %ebx ) | ||
| 294 | SRC( movl 20(%esi), %edx ) | ||
| 295 | adcl %ebx, %eax | ||
| 296 | DST( movl %ebx, 16(%edi) ) | ||
| 297 | adcl %edx, %eax | ||
| 298 | DST( movl %edx, 20(%edi) ) | ||
| 299 | |||
| 300 | SRC( movl 24(%esi), %ebx ) | ||
| 301 | SRC( movl 28(%esi), %edx ) | ||
| 302 | adcl %ebx, %eax | ||
| 303 | DST( movl %ebx, 24(%edi) ) | ||
| 304 | adcl %edx, %eax | ||
| 305 | DST( movl %edx, 28(%edi) ) | ||
| 306 | |||
| 307 | lea 32(%esi), %esi | ||
| 308 | lea 32(%edi), %edi | ||
| 309 | dec %ecx | ||
| 310 | jne 1b | ||
| 311 | adcl $0, %eax | ||
| 312 | 2: movl FP(%esp), %edx | ||
| 313 | movl %edx, %ecx | ||
| 314 | andl $0x1c, %edx | ||
| 315 | je 4f | ||
| 316 | shrl $2, %edx # This clears CF | ||
| 317 | SRC(3: movl (%esi), %ebx ) | ||
| 318 | adcl %ebx, %eax | ||
| 319 | DST( movl %ebx, (%edi) ) | ||
| 320 | lea 4(%esi), %esi | ||
| 321 | lea 4(%edi), %edi | ||
| 322 | dec %edx | ||
| 323 | jne 3b | ||
| 324 | adcl $0, %eax | ||
| 325 | 4: andl $3, %ecx | ||
| 326 | jz 7f | ||
| 327 | cmpl $2, %ecx | ||
| 328 | jb 5f | ||
| 329 | SRC( movw (%esi), %cx ) | ||
| 330 | leal 2(%esi), %esi | ||
| 331 | DST( movw %cx, (%edi) ) | ||
| 332 | leal 2(%edi), %edi | ||
| 333 | je 6f | ||
| 334 | shll $16,%ecx | ||
| 335 | SRC(5: movb (%esi), %cl ) | ||
| 336 | DST( movb %cl, (%edi) ) | ||
| 337 | 6: addl %ecx, %eax | ||
| 338 | adcl $0, %eax | ||
| 339 | 7: | ||
| 340 | 5000: | ||
| 341 | |||
| 342 | # Exception handler: | ||
| 343 | .section .fixup, "ax" | ||
| 344 | |||
| 345 | 6001: | ||
| 346 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
| 347 | movl $-EFAULT, (%ebx) | ||
| 348 | |||
| 349 | # zero the complete destination - computing the rest | ||
| 350 | # is too much work | ||
| 351 | movl ARGBASE+8(%esp), %edi # dst | ||
| 352 | movl ARGBASE+12(%esp), %ecx # len | ||
| 353 | xorl %eax,%eax | ||
| 354 | rep ; stosb | ||
| 355 | |||
| 356 | jmp 5000b | ||
| 357 | |||
| 358 | 6002: | ||
| 359 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
| 360 | movl $-EFAULT,(%ebx) | ||
| 361 | jmp 5000b | ||
| 362 | |||
| 363 | .previous | ||
| 364 | |||
| 365 | popl %ebx | ||
| 366 | popl %esi | ||
| 367 | popl %edi | ||
| 368 | popl %ecx # equivalent to addl $4,%esp | ||
| 369 | ret | ||
| 370 | |||
| 371 | #else | ||
| 372 | |||
| 373 | /* Version for PentiumII/PPro */ | ||
| 374 | |||
| 375 | #define ROUND1(x) \ | ||
| 376 | SRC(movl x(%esi), %ebx ) ; \ | ||
| 377 | addl %ebx, %eax ; \ | ||
| 378 | DST(movl %ebx, x(%edi) ) ; | ||
| 379 | |||
| 380 | #define ROUND(x) \ | ||
| 381 | SRC(movl x(%esi), %ebx ) ; \ | ||
| 382 | adcl %ebx, %eax ; \ | ||
| 383 | DST(movl %ebx, x(%edi) ) ; | ||
| 384 | |||
| 385 | #define ARGBASE 12 | ||
| 386 | |||
| 387 | csum_partial_copy_generic_i386: | ||
| 388 | pushl %ebx | ||
| 389 | pushl %edi | ||
| 390 | pushl %esi | ||
| 391 | movl ARGBASE+4(%esp),%esi #src | ||
| 392 | movl ARGBASE+8(%esp),%edi #dst | ||
| 393 | movl ARGBASE+12(%esp),%ecx #len | ||
| 394 | movl ARGBASE+16(%esp),%eax #sum | ||
| 395 | # movl %ecx, %edx | ||
| 396 | movl %ecx, %ebx | ||
| 397 | movl %esi, %edx | ||
| 398 | shrl $6, %ecx | ||
| 399 | andl $0x3c, %ebx | ||
| 400 | negl %ebx | ||
| 401 | subl %ebx, %esi | ||
| 402 | subl %ebx, %edi | ||
| 403 | lea -1(%esi),%edx | ||
| 404 | andl $-32,%edx | ||
| 405 | lea 3f(%ebx,%ebx), %ebx | ||
| 406 | testl %esi, %esi | ||
| 407 | jmp *%ebx | ||
| 408 | 1: addl $64,%esi | ||
| 409 | addl $64,%edi | ||
| 410 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | ||
| 411 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | ||
| 412 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | ||
| 413 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | ||
| 414 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | ||
| 415 | 3: adcl $0,%eax | ||
| 416 | addl $64, %edx | ||
| 417 | dec %ecx | ||
| 418 | jge 1b | ||
| 419 | 4: movl ARGBASE+12(%esp),%edx #len | ||
| 420 | andl $3, %edx | ||
| 421 | jz 7f | ||
| 422 | cmpl $2, %edx | ||
| 423 | jb 5f | ||
| 424 | SRC( movw (%esi), %dx ) | ||
| 425 | leal 2(%esi), %esi | ||
| 426 | DST( movw %dx, (%edi) ) | ||
| 427 | leal 2(%edi), %edi | ||
| 428 | je 6f | ||
| 429 | shll $16,%edx | ||
| 430 | 5: | ||
| 431 | SRC( movb (%esi), %dl ) | ||
| 432 | DST( movb %dl, (%edi) ) | ||
| 433 | 6: addl %edx, %eax | ||
| 434 | adcl $0, %eax | ||
| 435 | 7: | ||
| 436 | .section .fixup, "ax" | ||
| 437 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
| 438 | movl $-EFAULT, (%ebx) | ||
| 439 | # zero the complete destination (computing the rest is too much work) | ||
| 440 | movl ARGBASE+8(%esp),%edi # dst | ||
| 441 | movl ARGBASE+12(%esp),%ecx # len | ||
| 442 | xorl %eax,%eax | ||
| 443 | rep; stosb | ||
| 444 | jmp 7b | ||
| 445 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
| 446 | movl $-EFAULT, (%ebx) | ||
| 447 | jmp 7b | ||
| 448 | .previous | ||
| 449 | |||
| 450 | popl %esi | ||
| 451 | popl %edi | ||
| 452 | popl %ebx | ||
| 453 | ret | ||
| 454 | |||
| 455 | #undef ROUND | ||
| 456 | #undef ROUND1 | ||
| 457 | |||
| 458 | #endif | ||
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c new file mode 100644 index 00000000000..f3fe1a688f7 --- /dev/null +++ b/arch/um/sys-i386/delay.c | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
| 3 | * Mostly copied from arch/x86/lib/delay.c | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <asm/param.h> | ||
| 14 | |||
| 15 | void __delay(unsigned long loops) | ||
| 16 | { | ||
| 17 | asm volatile( | ||
| 18 | "test %0,%0\n" | ||
| 19 | "jz 3f\n" | ||
| 20 | "jmp 1f\n" | ||
| 21 | |||
| 22 | ".align 16\n" | ||
| 23 | "1: jmp 2f\n" | ||
| 24 | |||
| 25 | ".align 16\n" | ||
| 26 | "2: dec %0\n" | ||
| 27 | " jnz 2b\n" | ||
| 28 | "3: dec %0\n" | ||
| 29 | |||
| 30 | : /* we don't need output */ | ||
| 31 | : "a" (loops) | ||
| 32 | ); | ||
| 33 | } | ||
| 34 | EXPORT_SYMBOL(__delay); | ||
| 35 | |||
| 36 | inline void __const_udelay(unsigned long xloops) | ||
| 37 | { | ||
| 38 | int d0; | ||
| 39 | |||
| 40 | xloops *= 4; | ||
| 41 | asm("mull %%edx" | ||
| 42 | : "=d" (xloops), "=&a" (d0) | ||
| 43 | : "1" (xloops), "0" | ||
| 44 | (loops_per_jiffy * (HZ/4))); | ||
| 45 | |||
| 46 | __delay(++xloops); | ||
| 47 | } | ||
| 48 | EXPORT_SYMBOL(__const_udelay); | ||
| 49 | |||
| 50 | void __udelay(unsigned long usecs) | ||
| 51 | { | ||
| 52 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
| 53 | } | ||
| 54 | EXPORT_SYMBOL(__udelay); | ||
| 55 | |||
| 56 | void __ndelay(unsigned long nsecs) | ||
| 57 | { | ||
| 58 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
| 59 | } | ||
| 60 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/um/sys-i386/elfcore.c b/arch/um/sys-i386/elfcore.c new file mode 100644 index 00000000000..6bb49b687c9 --- /dev/null +++ b/arch/um/sys-i386/elfcore.c | |||
| @@ -0,0 +1,83 @@ | |||
| 1 | #include <linux/elf.h> | ||
| 2 | #include <linux/coredump.h> | ||
| 3 | #include <linux/fs.h> | ||
| 4 | #include <linux/mm.h> | ||
| 5 | |||
| 6 | #include <asm/elf.h> | ||
| 7 | |||
| 8 | |||
| 9 | Elf32_Half elf_core_extra_phdrs(void) | ||
| 10 | { | ||
| 11 | return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; | ||
| 12 | } | ||
| 13 | |||
| 14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
| 15 | unsigned long limit) | ||
| 16 | { | ||
| 17 | if ( vsyscall_ehdr ) { | ||
| 18 | const struct elfhdr *const ehdrp = | ||
| 19 | (struct elfhdr *) vsyscall_ehdr; | ||
| 20 | const struct elf_phdr *const phdrp = | ||
| 21 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
| 22 | int i; | ||
| 23 | Elf32_Off ofs = 0; | ||
| 24 | |||
| 25 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
| 26 | struct elf_phdr phdr = phdrp[i]; | ||
| 27 | |||
| 28 | if (phdr.p_type == PT_LOAD) { | ||
| 29 | ofs = phdr.p_offset = offset; | ||
| 30 | offset += phdr.p_filesz; | ||
| 31 | } else { | ||
| 32 | phdr.p_offset += ofs; | ||
| 33 | } | ||
| 34 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
| 35 | *size += sizeof(phdr); | ||
| 36 | if (*size > limit | ||
| 37 | || !dump_write(file, &phdr, sizeof(phdr))) | ||
| 38 | return 0; | ||
| 39 | } | ||
| 40 | } | ||
| 41 | return 1; | ||
| 42 | } | ||
| 43 | |||
| 44 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
| 45 | unsigned long limit) | ||
| 46 | { | ||
| 47 | if ( vsyscall_ehdr ) { | ||
| 48 | const struct elfhdr *const ehdrp = | ||
| 49 | (struct elfhdr *) vsyscall_ehdr; | ||
| 50 | const struct elf_phdr *const phdrp = | ||
| 51 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
| 52 | int i; | ||
| 53 | |||
| 54 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
| 55 | if (phdrp[i].p_type == PT_LOAD) { | ||
| 56 | void *addr = (void *) phdrp[i].p_vaddr; | ||
| 57 | size_t filesz = phdrp[i].p_filesz; | ||
| 58 | |||
| 59 | *size += filesz; | ||
| 60 | if (*size > limit | ||
| 61 | || !dump_write(file, addr, filesz)) | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | } | ||
| 65 | } | ||
| 66 | return 1; | ||
| 67 | } | ||
| 68 | |||
| 69 | size_t elf_core_extra_data_size(void) | ||
| 70 | { | ||
| 71 | if ( vsyscall_ehdr ) { | ||
| 72 | const struct elfhdr *const ehdrp = | ||
| 73 | (struct elfhdr *)vsyscall_ehdr; | ||
| 74 | const struct elf_phdr *const phdrp = | ||
| 75 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
| 76 | int i; | ||
| 77 | |||
| 78 | for (i = 0; i < ehdrp->e_phnum; ++i) | ||
| 79 | if (phdrp[i].p_type == PT_LOAD) | ||
| 80 | return (size_t) phdrp[i].p_filesz; | ||
| 81 | } | ||
| 82 | return 0; | ||
| 83 | } | ||
diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c new file mode 100644 index 00000000000..d670f68532f --- /dev/null +++ b/arch/um/sys-i386/fault.c | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "sysdep/ptrace.h" | ||
| 7 | |||
| 8 | /* These two are from asm-um/uaccess.h and linux/module.h, check them. */ | ||
| 9 | struct exception_table_entry | ||
| 10 | { | ||
| 11 | unsigned long insn; | ||
| 12 | unsigned long fixup; | ||
| 13 | }; | ||
| 14 | |||
| 15 | const struct exception_table_entry *search_exception_tables(unsigned long add); | ||
| 16 | |||
| 17 | /* Compare this to arch/i386/mm/extable.c:fixup_exception() */ | ||
| 18 | int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | ||
| 19 | { | ||
| 20 | const struct exception_table_entry *fixup; | ||
| 21 | |||
| 22 | fixup = search_exception_tables(address); | ||
| 23 | if (fixup != 0) { | ||
| 24 | UPT_IP(regs) = fixup->fixup; | ||
| 25 | return 1; | ||
| 26 | } | ||
| 27 | return 0; | ||
| 28 | } | ||
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c new file mode 100644 index 00000000000..bfbefd30db8 --- /dev/null +++ b/arch/um/sys-i386/ksyms.c | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | #include "linux/module.h" | ||
| 2 | #include "asm/checksum.h" | ||
| 3 | |||
| 4 | /* Networking helper routines. */ | ||
| 5 | EXPORT_SYMBOL(csum_partial); | ||
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c new file mode 100644 index 00000000000..3f2bf208d88 --- /dev/null +++ b/arch/um/sys-i386/ldt.c | |||
| @@ -0,0 +1,502 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/mm.h> | ||
| 7 | #include <linux/sched.h> | ||
| 8 | #include <linux/slab.h> | ||
| 9 | #include <asm/unistd.h> | ||
| 10 | #include "os.h" | ||
| 11 | #include "proc_mm.h" | ||
| 12 | #include "skas.h" | ||
| 13 | #include "skas_ptrace.h" | ||
| 14 | #include "sysdep/tls.h" | ||
| 15 | |||
| 16 | extern int modify_ldt(int func, void *ptr, unsigned long bytecount); | ||
| 17 | |||
| 18 | static long write_ldt_entry(struct mm_id *mm_idp, int func, | ||
| 19 | struct user_desc *desc, void **addr, int done) | ||
| 20 | { | ||
| 21 | long res; | ||
| 22 | |||
| 23 | if (proc_mm) { | ||
| 24 | /* | ||
| 25 | * This is a special handling for the case, that the mm to | ||
| 26 | * modify isn't current->active_mm. | ||
| 27 | * If this is called directly by modify_ldt, | ||
| 28 | * (current->active_mm->context.skas.u == mm_idp) | ||
| 29 | * will be true. So no call to __switch_mm(mm_idp) is done. | ||
| 30 | * If this is called in case of init_new_ldt or PTRACE_LDT, | ||
| 31 | * mm_idp won't belong to current->active_mm, but child->mm. | ||
| 32 | * So we need to switch child's mm into our userspace, then | ||
| 33 | * later switch back. | ||
| 34 | * | ||
| 35 | * Note: I'm unsure: should interrupts be disabled here? | ||
| 36 | */ | ||
| 37 | if (!current->active_mm || current->active_mm == &init_mm || | ||
| 38 | mm_idp != ¤t->active_mm->context.id) | ||
| 39 | __switch_mm(mm_idp); | ||
| 40 | } | ||
| 41 | |||
| 42 | if (ptrace_ldt) { | ||
| 43 | struct ptrace_ldt ldt_op = (struct ptrace_ldt) { | ||
| 44 | .func = func, | ||
| 45 | .ptr = desc, | ||
| 46 | .bytecount = sizeof(*desc)}; | ||
| 47 | u32 cpu; | ||
| 48 | int pid; | ||
| 49 | |||
| 50 | if (!proc_mm) | ||
| 51 | pid = mm_idp->u.pid; | ||
| 52 | else { | ||
| 53 | cpu = get_cpu(); | ||
| 54 | pid = userspace_pid[cpu]; | ||
| 55 | } | ||
| 56 | |||
| 57 | res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); | ||
| 58 | |||
| 59 | if (proc_mm) | ||
| 60 | put_cpu(); | ||
| 61 | } | ||
| 62 | else { | ||
| 63 | void *stub_addr; | ||
| 64 | res = syscall_stub_data(mm_idp, (unsigned long *)desc, | ||
| 65 | (sizeof(*desc) + sizeof(long) - 1) & | ||
| 66 | ~(sizeof(long) - 1), | ||
| 67 | addr, &stub_addr); | ||
| 68 | if (!res) { | ||
| 69 | unsigned long args[] = { func, | ||
| 70 | (unsigned long)stub_addr, | ||
| 71 | sizeof(*desc), | ||
| 72 | 0, 0, 0 }; | ||
| 73 | res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, | ||
| 74 | 0, addr, done); | ||
| 75 | } | ||
| 76 | } | ||
| 77 | |||
| 78 | if (proc_mm) { | ||
| 79 | /* | ||
| 80 | * This is the second part of special handling, that makes | ||
| 81 | * PTRACE_LDT possible to implement. | ||
| 82 | */ | ||
| 83 | if (current->active_mm && current->active_mm != &init_mm && | ||
| 84 | mm_idp != ¤t->active_mm->context.id) | ||
| 85 | __switch_mm(¤t->active_mm->context.id); | ||
| 86 | } | ||
| 87 | |||
| 88 | return res; | ||
| 89 | } | ||
| 90 | |||
| 91 | static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) | ||
| 92 | { | ||
| 93 | int res, n; | ||
| 94 | struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { | ||
| 95 | .func = 0, | ||
| 96 | .bytecount = bytecount, | ||
| 97 | .ptr = kmalloc(bytecount, GFP_KERNEL)}; | ||
| 98 | u32 cpu; | ||
| 99 | |||
| 100 | if (ptrace_ldt.ptr == NULL) | ||
| 101 | return -ENOMEM; | ||
| 102 | |||
| 103 | /* | ||
| 104 | * This is called from sys_modify_ldt only, so userspace_pid gives | ||
| 105 | * us the right number | ||
| 106 | */ | ||
| 107 | |||
| 108 | cpu = get_cpu(); | ||
| 109 | res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); | ||
| 110 | put_cpu(); | ||
| 111 | if (res < 0) | ||
| 112 | goto out; | ||
| 113 | |||
| 114 | n = copy_to_user(ptr, ptrace_ldt.ptr, res); | ||
| 115 | if (n != 0) | ||
| 116 | res = -EFAULT; | ||
| 117 | |||
| 118 | out: | ||
| 119 | kfree(ptrace_ldt.ptr); | ||
| 120 | |||
| 121 | return res; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * In skas mode, we hold our own ldt data in UML. | ||
| 126 | * Thus, the code implementing sys_modify_ldt_skas | ||
| 127 | * is very similar to (and mostly stolen from) sys_modify_ldt | ||
| 128 | * for arch/i386/kernel/ldt.c | ||
| 129 | * The routines copied and modified in part are: | ||
| 130 | * - read_ldt | ||
| 131 | * - read_default_ldt | ||
| 132 | * - write_ldt | ||
| 133 | * - sys_modify_ldt_skas | ||
| 134 | */ | ||
| 135 | |||
| 136 | static int read_ldt(void __user * ptr, unsigned long bytecount) | ||
| 137 | { | ||
| 138 | int i, err = 0; | ||
| 139 | unsigned long size; | ||
| 140 | uml_ldt_t * ldt = ¤t->mm->context.ldt; | ||
| 141 | |||
| 142 | if (!ldt->entry_count) | ||
| 143 | goto out; | ||
| 144 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) | ||
| 145 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; | ||
| 146 | err = bytecount; | ||
| 147 | |||
| 148 | if (ptrace_ldt) | ||
| 149 | return read_ldt_from_host(ptr, bytecount); | ||
| 150 | |||
| 151 | mutex_lock(&ldt->lock); | ||
| 152 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { | ||
| 153 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; | ||
| 154 | if (size > bytecount) | ||
| 155 | size = bytecount; | ||
| 156 | if (copy_to_user(ptr, ldt->u.entries, size)) | ||
| 157 | err = -EFAULT; | ||
| 158 | bytecount -= size; | ||
| 159 | ptr += size; | ||
| 160 | } | ||
| 161 | else { | ||
| 162 | for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; | ||
| 163 | i++) { | ||
| 164 | size = PAGE_SIZE; | ||
| 165 | if (size > bytecount) | ||
| 166 | size = bytecount; | ||
| 167 | if (copy_to_user(ptr, ldt->u.pages[i], size)) { | ||
| 168 | err = -EFAULT; | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | bytecount -= size; | ||
| 172 | ptr += size; | ||
| 173 | } | ||
| 174 | } | ||
| 175 | mutex_unlock(&ldt->lock); | ||
| 176 | |||
| 177 | if (bytecount == 0 || err == -EFAULT) | ||
| 178 | goto out; | ||
| 179 | |||
| 180 | if (clear_user(ptr, bytecount)) | ||
| 181 | err = -EFAULT; | ||
| 182 | |||
| 183 | out: | ||
| 184 | return err; | ||
| 185 | } | ||
| 186 | |||
| 187 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) | ||
| 188 | { | ||
| 189 | int err; | ||
| 190 | |||
| 191 | if (bytecount > 5*LDT_ENTRY_SIZE) | ||
| 192 | bytecount = 5*LDT_ENTRY_SIZE; | ||
| 193 | |||
| 194 | err = bytecount; | ||
| 195 | /* | ||
| 196 | * UML doesn't support lcall7 and lcall27. | ||
| 197 | * So, we don't really have a default ldt, but emulate | ||
| 198 | * an empty ldt of common host default ldt size. | ||
| 199 | */ | ||
| 200 | if (clear_user(ptr, bytecount)) | ||
| 201 | err = -EFAULT; | ||
| 202 | |||
| 203 | return err; | ||
| 204 | } | ||
| 205 | |||
| 206 | static int write_ldt(void __user * ptr, unsigned long bytecount, int func) | ||
| 207 | { | ||
| 208 | uml_ldt_t * ldt = ¤t->mm->context.ldt; | ||
| 209 | struct mm_id * mm_idp = ¤t->mm->context.id; | ||
| 210 | int i, err; | ||
| 211 | struct user_desc ldt_info; | ||
| 212 | struct ldt_entry entry0, *ldt_p; | ||
| 213 | void *addr = NULL; | ||
| 214 | |||
| 215 | err = -EINVAL; | ||
| 216 | if (bytecount != sizeof(ldt_info)) | ||
| 217 | goto out; | ||
| 218 | err = -EFAULT; | ||
| 219 | if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) | ||
| 220 | goto out; | ||
| 221 | |||
| 222 | err = -EINVAL; | ||
| 223 | if (ldt_info.entry_number >= LDT_ENTRIES) | ||
| 224 | goto out; | ||
| 225 | if (ldt_info.contents == 3) { | ||
| 226 | if (func == 1) | ||
| 227 | goto out; | ||
| 228 | if (ldt_info.seg_not_present == 0) | ||
| 229 | goto out; | ||
| 230 | } | ||
| 231 | |||
| 232 | if (!ptrace_ldt) | ||
| 233 | mutex_lock(&ldt->lock); | ||
| 234 | |||
| 235 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); | ||
| 236 | if (err) | ||
| 237 | goto out_unlock; | ||
| 238 | else if (ptrace_ldt) { | ||
| 239 | /* With PTRACE_LDT available, this is used as a flag only */ | ||
| 240 | ldt->entry_count = 1; | ||
| 241 | goto out; | ||
| 242 | } | ||
| 243 | |||
| 244 | if (ldt_info.entry_number >= ldt->entry_count && | ||
| 245 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { | ||
| 246 | for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; | ||
| 247 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; | ||
| 248 | i++) { | ||
| 249 | if (i == 0) | ||
| 250 | memcpy(&entry0, ldt->u.entries, | ||
| 251 | sizeof(entry0)); | ||
| 252 | ldt->u.pages[i] = (struct ldt_entry *) | ||
| 253 | __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
| 254 | if (!ldt->u.pages[i]) { | ||
| 255 | err = -ENOMEM; | ||
| 256 | /* Undo the change in host */ | ||
| 257 | memset(&ldt_info, 0, sizeof(ldt_info)); | ||
| 258 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); | ||
| 259 | goto out_unlock; | ||
| 260 | } | ||
| 261 | if (i == 0) { | ||
| 262 | memcpy(ldt->u.pages[0], &entry0, | ||
| 263 | sizeof(entry0)); | ||
| 264 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, | ||
| 265 | sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); | ||
| 266 | } | ||
| 267 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; | ||
| 268 | } | ||
| 269 | } | ||
| 270 | if (ldt->entry_count <= ldt_info.entry_number) | ||
| 271 | ldt->entry_count = ldt_info.entry_number + 1; | ||
| 272 | |||
| 273 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) | ||
| 274 | ldt_p = ldt->u.entries + ldt_info.entry_number; | ||
| 275 | else | ||
| 276 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + | ||
| 277 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; | ||
| 278 | |||
| 279 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && | ||
| 280 | (func == 1 || LDT_empty(&ldt_info))) { | ||
| 281 | ldt_p->a = 0; | ||
| 282 | ldt_p->b = 0; | ||
| 283 | } | ||
| 284 | else{ | ||
| 285 | if (func == 1) | ||
| 286 | ldt_info.useable = 0; | ||
| 287 | ldt_p->a = LDT_entry_a(&ldt_info); | ||
| 288 | ldt_p->b = LDT_entry_b(&ldt_info); | ||
| 289 | } | ||
| 290 | err = 0; | ||
| 291 | |||
| 292 | out_unlock: | ||
| 293 | mutex_unlock(&ldt->lock); | ||
| 294 | out: | ||
| 295 | return err; | ||
| 296 | } | ||
| 297 | |||
| 298 | static long do_modify_ldt_skas(int func, void __user *ptr, | ||
| 299 | unsigned long bytecount) | ||
| 300 | { | ||
| 301 | int ret = -ENOSYS; | ||
| 302 | |||
| 303 | switch (func) { | ||
| 304 | case 0: | ||
| 305 | ret = read_ldt(ptr, bytecount); | ||
| 306 | break; | ||
| 307 | case 1: | ||
| 308 | case 0x11: | ||
| 309 | ret = write_ldt(ptr, bytecount, func); | ||
| 310 | break; | ||
| 311 | case 2: | ||
| 312 | ret = read_default_ldt(ptr, bytecount); | ||
| 313 | break; | ||
| 314 | } | ||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | |||
| 318 | static DEFINE_SPINLOCK(host_ldt_lock); | ||
| 319 | static short dummy_list[9] = {0, -1}; | ||
| 320 | static short * host_ldt_entries = NULL; | ||
| 321 | |||
| 322 | static void ldt_get_host_info(void) | ||
| 323 | { | ||
| 324 | long ret; | ||
| 325 | struct ldt_entry * ldt; | ||
| 326 | short *tmp; | ||
| 327 | int i, size, k, order; | ||
| 328 | |||
| 329 | spin_lock(&host_ldt_lock); | ||
| 330 | |||
| 331 | if (host_ldt_entries != NULL) { | ||
| 332 | spin_unlock(&host_ldt_lock); | ||
| 333 | return; | ||
| 334 | } | ||
| 335 | host_ldt_entries = dummy_list+1; | ||
| 336 | |||
| 337 | spin_unlock(&host_ldt_lock); | ||
| 338 | |||
| 339 | for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) | ||
| 340 | ; | ||
| 341 | |||
| 342 | ldt = (struct ldt_entry *) | ||
| 343 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
| 344 | if (ldt == NULL) { | ||
| 345 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " | ||
| 346 | "for host ldt\n"); | ||
| 347 | return; | ||
| 348 | } | ||
| 349 | |||
| 350 | ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); | ||
| 351 | if (ret < 0) { | ||
| 352 | printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n"); | ||
| 353 | goto out_free; | ||
| 354 | } | ||
| 355 | if (ret == 0) { | ||
| 356 | /* default_ldt is active, simply write an empty entry 0 */ | ||
| 357 | host_ldt_entries = dummy_list; | ||
| 358 | goto out_free; | ||
| 359 | } | ||
| 360 | |||
| 361 | for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
| 362 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
| 363 | size++; | ||
| 364 | } | ||
| 365 | |||
| 366 | if (size < ARRAY_SIZE(dummy_list)) | ||
| 367 | host_ldt_entries = dummy_list; | ||
| 368 | else { | ||
| 369 | size = (size + 1) * sizeof(dummy_list[0]); | ||
| 370 | tmp = kmalloc(size, GFP_KERNEL); | ||
| 371 | if (tmp == NULL) { | ||
| 372 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate " | ||
| 373 | "host ldt list\n"); | ||
| 374 | goto out_free; | ||
| 375 | } | ||
| 376 | host_ldt_entries = tmp; | ||
| 377 | } | ||
| 378 | |||
| 379 | for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { | ||
| 380 | if (ldt[i].a != 0 || ldt[i].b != 0) | ||
| 381 | host_ldt_entries[k++] = i; | ||
| 382 | } | ||
| 383 | host_ldt_entries[k] = -1; | ||
| 384 | |||
| 385 | out_free: | ||
| 386 | free_pages((unsigned long)ldt, order); | ||
| 387 | } | ||
| 388 | |||
| 389 | long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) | ||
| 390 | { | ||
| 391 | struct user_desc desc; | ||
| 392 | short * num_p; | ||
| 393 | int i; | ||
| 394 | long page, err=0; | ||
| 395 | void *addr = NULL; | ||
| 396 | struct proc_mm_op copy; | ||
| 397 | |||
| 398 | |||
| 399 | if (!ptrace_ldt) | ||
| 400 | mutex_init(&new_mm->ldt.lock); | ||
| 401 | |||
| 402 | if (!from_mm) { | ||
| 403 | memset(&desc, 0, sizeof(desc)); | ||
| 404 | /* | ||
| 405 | * We have to initialize a clean ldt. | ||
| 406 | */ | ||
| 407 | if (proc_mm) { | ||
| 408 | /* | ||
| 409 | * If the new mm was created using proc_mm, host's | ||
| 410 | * default-ldt currently is assigned, which normally | ||
| 411 | * contains the call-gates for lcall7 and lcall27. | ||
| 412 | * To remove these gates, we simply write an empty | ||
| 413 | * entry as number 0 to the host. | ||
| 414 | */ | ||
| 415 | err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); | ||
| 416 | } | ||
| 417 | else{ | ||
| 418 | /* | ||
| 419 | * Now we try to retrieve info about the ldt, we | ||
| 420 | * inherited from the host. All ldt-entries found | ||
| 421 | * will be reset in the following loop | ||
| 422 | */ | ||
| 423 | ldt_get_host_info(); | ||
| 424 | for (num_p=host_ldt_entries; *num_p != -1; num_p++) { | ||
| 425 | desc.entry_number = *num_p; | ||
| 426 | err = write_ldt_entry(&new_mm->id, 1, &desc, | ||
| 427 | &addr, *(num_p + 1) == -1); | ||
| 428 | if (err) | ||
| 429 | break; | ||
| 430 | } | ||
| 431 | } | ||
| 432 | new_mm->ldt.entry_count = 0; | ||
| 433 | |||
| 434 | goto out; | ||
| 435 | } | ||
| 436 | |||
| 437 | if (proc_mm) { | ||
| 438 | /* | ||
| 439 | * We have a valid from_mm, so we now have to copy the LDT of | ||
| 440 | * from_mm to new_mm, because using proc_mm an new mm with | ||
| 441 | * an empty/default LDT was created in new_mm() | ||
| 442 | */ | ||
| 443 | copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS, | ||
| 444 | .u = | ||
| 445 | { .copy_segments = | ||
| 446 | from_mm->id.u.mm_fd } } ); | ||
| 447 | i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); | ||
| 448 | if (i != sizeof(copy)) | ||
| 449 | printk(KERN_ERR "new_mm : /proc/mm copy_segments " | ||
| 450 | "failed, err = %d\n", -i); | ||
| 451 | } | ||
| 452 | |||
| 453 | if (!ptrace_ldt) { | ||
| 454 | /* | ||
| 455 | * Our local LDT is used to supply the data for | ||
| 456 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, | ||
| 457 | * i.e., we have to use the stub for modify_ldt, which | ||
| 458 | * can't handle the big read buffer of up to 64kB. | ||
| 459 | */ | ||
| 460 | mutex_lock(&from_mm->ldt.lock); | ||
| 461 | if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES) | ||
| 462 | memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries, | ||
| 463 | sizeof(new_mm->ldt.u.entries)); | ||
| 464 | else { | ||
| 465 | i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
| 466 | while (i-->0) { | ||
| 467 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
| 468 | if (!page) { | ||
| 469 | err = -ENOMEM; | ||
| 470 | break; | ||
| 471 | } | ||
| 472 | new_mm->ldt.u.pages[i] = | ||
| 473 | (struct ldt_entry *) page; | ||
| 474 | memcpy(new_mm->ldt.u.pages[i], | ||
| 475 | from_mm->ldt.u.pages[i], PAGE_SIZE); | ||
| 476 | } | ||
| 477 | } | ||
| 478 | new_mm->ldt.entry_count = from_mm->ldt.entry_count; | ||
| 479 | mutex_unlock(&from_mm->ldt.lock); | ||
| 480 | } | ||
| 481 | |||
| 482 | out: | ||
| 483 | return err; | ||
| 484 | } | ||
| 485 | |||
| 486 | |||
| 487 | void free_ldt(struct mm_context *mm) | ||
| 488 | { | ||
| 489 | int i; | ||
| 490 | |||
| 491 | if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) { | ||
| 492 | i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE; | ||
| 493 | while (i-- > 0) | ||
| 494 | free_page((long) mm->ldt.u.pages[i]); | ||
| 495 | } | ||
| 496 | mm->ldt.entry_count = 0; | ||
| 497 | } | ||
| 498 | |||
| 499 | int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) | ||
| 500 | { | ||
| 501 | return do_modify_ldt_skas(func, ptr, bytecount); | ||
| 502 | } | ||
diff --git a/arch/um/sys-i386/mem.c b/arch/um/sys-i386/mem.c new file mode 100644 index 00000000000..639900a6fde --- /dev/null +++ b/arch/um/sys-i386/mem.c | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/mm.h> | ||
| 10 | #include <asm/page.h> | ||
| 11 | #include <asm/mman.h> | ||
| 12 | |||
| 13 | static struct vm_area_struct gate_vma; | ||
| 14 | |||
| 15 | static int __init gate_vma_init(void) | ||
| 16 | { | ||
| 17 | if (!FIXADDR_USER_START) | ||
| 18 | return 0; | ||
| 19 | |||
| 20 | gate_vma.vm_mm = NULL; | ||
| 21 | gate_vma.vm_start = FIXADDR_USER_START; | ||
| 22 | gate_vma.vm_end = FIXADDR_USER_END; | ||
| 23 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | ||
| 24 | gate_vma.vm_page_prot = __P101; | ||
| 25 | |||
| 26 | /* | ||
| 27 | * Make sure the vDSO gets into every core dump. | ||
| 28 | * Dumping its contents makes post-mortem fully interpretable later | ||
| 29 | * without matching up the same kernel and hardware config to see | ||
| 30 | * what PC values meant. | ||
| 31 | */ | ||
| 32 | gate_vma.vm_flags |= VM_ALWAYSDUMP; | ||
| 33 | |||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | __initcall(gate_vma_init); | ||
| 37 | |||
| 38 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
| 39 | { | ||
| 40 | return FIXADDR_USER_START ? &gate_vma : NULL; | ||
| 41 | } | ||
| 42 | |||
| 43 | int in_gate_area_no_mm(unsigned long addr) | ||
| 44 | { | ||
| 45 | if (!FIXADDR_USER_START) | ||
| 46 | return 0; | ||
| 47 | |||
| 48 | if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) | ||
| 49 | return 1; | ||
| 50 | |||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | |||
| 54 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
| 55 | { | ||
| 56 | struct vm_area_struct *vma = get_gate_vma(mm); | ||
| 57 | |||
| 58 | if (!vma) | ||
| 59 | return 0; | ||
| 60 | |||
| 61 | return (addr >= vma->vm_start) && (addr < vma->vm_end); | ||
| 62 | } | ||
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c new file mode 100644 index 00000000000..3375c271785 --- /dev/null +++ b/arch/um/sys-i386/ptrace.c | |||
| @@ -0,0 +1,228 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "linux/mm.h" | ||
| 7 | #include "linux/sched.h" | ||
| 8 | #include "asm/uaccess.h" | ||
| 9 | #include "skas.h" | ||
| 10 | |||
| 11 | extern int arch_switch_tls(struct task_struct *to); | ||
| 12 | |||
| 13 | void arch_switch_to(struct task_struct *to) | ||
| 14 | { | ||
| 15 | int err = arch_switch_tls(to); | ||
| 16 | if (!err) | ||
| 17 | return; | ||
| 18 | |||
| 19 | if (err != -EINVAL) | ||
| 20 | printk(KERN_WARNING "arch_switch_tls failed, errno %d, " | ||
| 21 | "not EINVAL\n", -err); | ||
| 22 | else | ||
| 23 | printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n"); | ||
| 24 | } | ||
| 25 | |||
| 26 | int is_syscall(unsigned long addr) | ||
| 27 | { | ||
| 28 | unsigned short instr; | ||
| 29 | int n; | ||
| 30 | |||
| 31 | n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); | ||
| 32 | if (n) { | ||
| 33 | /* access_process_vm() grants access to vsyscall and stub, | ||
| 34 | * while copy_from_user doesn't. Maybe access_process_vm is | ||
| 35 | * slow, but that doesn't matter, since it will be called only | ||
| 36 | * in case of singlestepping, if copy_from_user failed. | ||
| 37 | */ | ||
| 38 | n = access_process_vm(current, addr, &instr, sizeof(instr), 0); | ||
| 39 | if (n != sizeof(instr)) { | ||
| 40 | printk(KERN_ERR "is_syscall : failed to read " | ||
| 41 | "instruction from 0x%lx\n", addr); | ||
| 42 | return 1; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | /* int 0x80 or sysenter */ | ||
| 46 | return (instr == 0x80cd) || (instr == 0x340f); | ||
| 47 | } | ||
| 48 | |||
| 49 | /* determines which flags the user has access to. */ | ||
| 50 | /* 1 = access 0 = no access */ | ||
| 51 | #define FLAG_MASK 0x00044dd5 | ||
| 52 | |||
| 53 | int putreg(struct task_struct *child, int regno, unsigned long value) | ||
| 54 | { | ||
| 55 | regno >>= 2; | ||
| 56 | switch (regno) { | ||
| 57 | case FS: | ||
| 58 | if (value && (value & 3) != 3) | ||
| 59 | return -EIO; | ||
| 60 | PT_REGS_FS(&child->thread.regs) = value; | ||
| 61 | return 0; | ||
| 62 | case GS: | ||
| 63 | if (value && (value & 3) != 3) | ||
| 64 | return -EIO; | ||
| 65 | PT_REGS_GS(&child->thread.regs) = value; | ||
| 66 | return 0; | ||
| 67 | case DS: | ||
| 68 | case ES: | ||
| 69 | if (value && (value & 3) != 3) | ||
| 70 | return -EIO; | ||
| 71 | value &= 0xffff; | ||
| 72 | break; | ||
| 73 | case SS: | ||
| 74 | case CS: | ||
| 75 | if ((value & 3) != 3) | ||
| 76 | return -EIO; | ||
| 77 | value &= 0xffff; | ||
| 78 | break; | ||
| 79 | case EFL: | ||
| 80 | value &= FLAG_MASK; | ||
| 81 | value |= PT_REGS_EFLAGS(&child->thread.regs); | ||
| 82 | break; | ||
| 83 | } | ||
| 84 | PT_REGS_SET(&child->thread.regs, regno, value); | ||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | |||
| 88 | int poke_user(struct task_struct *child, long addr, long data) | ||
| 89 | { | ||
| 90 | if ((addr & 3) || addr < 0) | ||
| 91 | return -EIO; | ||
| 92 | |||
| 93 | if (addr < MAX_REG_OFFSET) | ||
| 94 | return putreg(child, addr, data); | ||
| 95 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
| 96 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
| 97 | addr -= offsetof(struct user, u_debugreg[0]); | ||
| 98 | addr = addr >> 2; | ||
| 99 | if ((addr == 4) || (addr == 5)) | ||
| 100 | return -EIO; | ||
| 101 | child->thread.arch.debugregs[addr] = data; | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | return -EIO; | ||
| 105 | } | ||
| 106 | |||
| 107 | unsigned long getreg(struct task_struct *child, int regno) | ||
| 108 | { | ||
| 109 | unsigned long retval = ~0UL; | ||
| 110 | |||
| 111 | regno >>= 2; | ||
| 112 | switch (regno) { | ||
| 113 | case FS: | ||
| 114 | case GS: | ||
| 115 | case DS: | ||
| 116 | case ES: | ||
| 117 | case SS: | ||
| 118 | case CS: | ||
| 119 | retval = 0xffff; | ||
| 120 | /* fall through */ | ||
| 121 | default: | ||
| 122 | retval &= PT_REG(&child->thread.regs, regno); | ||
| 123 | } | ||
| 124 | return retval; | ||
| 125 | } | ||
| 126 | |||
| 127 | /* read the word at location addr in the USER area. */ | ||
| 128 | int peek_user(struct task_struct *child, long addr, long data) | ||
| 129 | { | ||
| 130 | unsigned long tmp; | ||
| 131 | |||
| 132 | if ((addr & 3) || addr < 0) | ||
| 133 | return -EIO; | ||
| 134 | |||
| 135 | tmp = 0; /* Default return condition */ | ||
| 136 | if (addr < MAX_REG_OFFSET) { | ||
| 137 | tmp = getreg(child, addr); | ||
| 138 | } | ||
| 139 | else if ((addr >= offsetof(struct user, u_debugreg[0])) && | ||
| 140 | (addr <= offsetof(struct user, u_debugreg[7]))) { | ||
| 141 | addr -= offsetof(struct user, u_debugreg[0]); | ||
| 142 | addr = addr >> 2; | ||
| 143 | tmp = child->thread.arch.debugregs[addr]; | ||
| 144 | } | ||
| 145 | return put_user(tmp, (unsigned long __user *) data); | ||
| 146 | } | ||
| 147 | |||
| 148 | static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
| 149 | { | ||
| 150 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
| 151 | struct user_i387_struct fpregs; | ||
| 152 | |||
| 153 | err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
| 154 | if (err) | ||
| 155 | return err; | ||
| 156 | |||
| 157 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
| 158 | if(n > 0) | ||
| 159 | return -EFAULT; | ||
| 160 | |||
| 161 | return n; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | ||
| 165 | { | ||
| 166 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
| 167 | struct user_i387_struct fpregs; | ||
| 168 | |||
| 169 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
| 170 | if (n > 0) | ||
| 171 | return -EFAULT; | ||
| 172 | |||
| 173 | return restore_fp_registers(userspace_pid[cpu], | ||
| 174 | (unsigned long *) &fpregs); | ||
| 175 | } | ||
| 176 | |||
| 177 | static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
| 178 | { | ||
| 179 | int err, n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
| 180 | struct user_fxsr_struct fpregs; | ||
| 181 | |||
| 182 | err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs); | ||
| 183 | if (err) | ||
| 184 | return err; | ||
| 185 | |||
| 186 | n = copy_to_user(buf, &fpregs, sizeof(fpregs)); | ||
| 187 | if(n > 0) | ||
| 188 | return -EFAULT; | ||
| 189 | |||
| 190 | return n; | ||
| 191 | } | ||
| 192 | |||
| 193 | static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | ||
| 194 | { | ||
| 195 | int n, cpu = ((struct thread_info *) child->stack)->cpu; | ||
| 196 | struct user_fxsr_struct fpregs; | ||
| 197 | |||
| 198 | n = copy_from_user(&fpregs, buf, sizeof(fpregs)); | ||
| 199 | if (n > 0) | ||
| 200 | return -EFAULT; | ||
| 201 | |||
| 202 | return restore_fpx_registers(userspace_pid[cpu], | ||
| 203 | (unsigned long *) &fpregs); | ||
| 204 | } | ||
| 205 | |||
| 206 | long subarch_ptrace(struct task_struct *child, long request, | ||
| 207 | unsigned long addr, unsigned long data) | ||
| 208 | { | ||
| 209 | int ret = -EIO; | ||
| 210 | void __user *datap = (void __user *) data; | ||
| 211 | switch (request) { | ||
| 212 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
| 213 | ret = get_fpregs(datap, child); | ||
| 214 | break; | ||
| 215 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
| 216 | ret = set_fpregs(datap, child); | ||
| 217 | break; | ||
| 218 | case PTRACE_GETFPXREGS: /* Get the child FPU state. */ | ||
| 219 | ret = get_fpxregs(datap, child); | ||
| 220 | break; | ||
| 221 | case PTRACE_SETFPXREGS: /* Set the child FPU state. */ | ||
| 222 | ret = set_fpxregs(datap, child); | ||
| 223 | break; | ||
| 224 | default: | ||
| 225 | ret = -EIO; | ||
| 226 | } | ||
| 227 | return ret; | ||
| 228 | } | ||
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c new file mode 100644 index 00000000000..0b10c3e7402 --- /dev/null +++ b/arch/um/sys-i386/ptrace_user.c | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <errno.h> | ||
| 7 | #include <sys/ptrace.h> | ||
| 8 | |||
| 9 | int ptrace_getregs(long pid, unsigned long *regs_out) | ||
| 10 | { | ||
| 11 | if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0) | ||
| 12 | return -errno; | ||
| 13 | return 0; | ||
| 14 | } | ||
| 15 | |||
| 16 | int ptrace_setregs(long pid, unsigned long *regs) | ||
| 17 | { | ||
| 18 | if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0) | ||
| 19 | return -errno; | ||
| 20 | return 0; | ||
| 21 | } | ||
diff --git a/arch/um/sys-i386/setjmp.S b/arch/um/sys-i386/setjmp.S new file mode 100644 index 00000000000..b766792c993 --- /dev/null +++ b/arch/um/sys-i386/setjmp.S | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | # | ||
| 2 | # arch/i386/setjmp.S | ||
| 3 | # | ||
| 4 | # setjmp/longjmp for the i386 architecture | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # The jmp_buf is assumed to contain the following, in order: | ||
| 9 | # %ebx | ||
| 10 | # %esp | ||
| 11 | # %ebp | ||
| 12 | # %esi | ||
| 13 | # %edi | ||
| 14 | # <return address> | ||
| 15 | # | ||
| 16 | |||
| 17 | .text | ||
| 18 | .align 4 | ||
| 19 | .globl setjmp | ||
| 20 | .type setjmp, @function | ||
| 21 | setjmp: | ||
| 22 | #ifdef _REGPARM | ||
| 23 | movl %eax,%edx | ||
| 24 | #else | ||
| 25 | movl 4(%esp),%edx | ||
| 26 | #endif | ||
| 27 | popl %ecx # Return address, and adjust the stack | ||
| 28 | xorl %eax,%eax # Return value | ||
| 29 | movl %ebx,(%edx) | ||
| 30 | movl %esp,4(%edx) # Post-return %esp! | ||
| 31 | pushl %ecx # Make the call/return stack happy | ||
| 32 | movl %ebp,8(%edx) | ||
| 33 | movl %esi,12(%edx) | ||
| 34 | movl %edi,16(%edx) | ||
| 35 | movl %ecx,20(%edx) # Return address | ||
| 36 | ret | ||
| 37 | |||
| 38 | .size setjmp,.-setjmp | ||
| 39 | |||
| 40 | .text | ||
| 41 | .align 4 | ||
| 42 | .globl longjmp | ||
| 43 | .type longjmp, @function | ||
| 44 | longjmp: | ||
| 45 | #ifdef _REGPARM | ||
| 46 | xchgl %eax,%edx | ||
| 47 | #else | ||
| 48 | movl 4(%esp),%edx # jmp_ptr address | ||
| 49 | movl 8(%esp),%eax # Return value | ||
| 50 | #endif | ||
| 51 | movl (%edx),%ebx | ||
| 52 | movl 4(%edx),%esp | ||
| 53 | movl 8(%edx),%ebp | ||
| 54 | movl 12(%edx),%esi | ||
| 55 | movl 16(%edx),%edi | ||
| 56 | jmp *20(%edx) | ||
| 57 | |||
| 58 | .size longjmp,.-longjmp | ||
diff --git a/arch/um/sys-i386/shared/sysdep/archsetjmp.h b/arch/um/sys-i386/shared/sysdep/archsetjmp.h new file mode 100644 index 00000000000..0f312085ce1 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/archsetjmp.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | /* | ||
| 2 | * arch/um/include/sysdep-i386/archsetjmp.h | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _KLIBC_ARCHSETJMP_H | ||
| 6 | #define _KLIBC_ARCHSETJMP_H | ||
| 7 | |||
| 8 | struct __jmp_buf { | ||
| 9 | unsigned int __ebx; | ||
| 10 | unsigned int __esp; | ||
| 11 | unsigned int __ebp; | ||
| 12 | unsigned int __esi; | ||
| 13 | unsigned int __edi; | ||
| 14 | unsigned int __eip; | ||
| 15 | }; | ||
| 16 | |||
| 17 | typedef struct __jmp_buf jmp_buf[1]; | ||
| 18 | |||
| 19 | #define JB_IP __eip | ||
| 20 | #define JB_SP __esp | ||
| 21 | |||
| 22 | #endif /* _SETJMP_H */ | ||
diff --git a/arch/um/sys-i386/shared/sysdep/barrier.h b/arch/um/sys-i386/shared/sysdep/barrier.h new file mode 100644 index 00000000000..b58d52c5b2f --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/barrier.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | #ifndef __SYSDEP_I386_BARRIER_H | ||
| 2 | #define __SYSDEP_I386_BARRIER_H | ||
| 3 | |||
| 4 | /* Copied from include/asm-i386 for use by userspace. i386 has the option | ||
| 5 | * of using mfence, but I'm just using this, which works everywhere, for now. | ||
| 6 | */ | ||
| 7 | #define mb() asm volatile("lock; addl $0,0(%esp)") | ||
| 8 | |||
| 9 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/checksum.h b/arch/um/sys-i386/shared/sysdep/checksum.h new file mode 100644 index 00000000000..ed47445f390 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/checksum.h | |||
| @@ -0,0 +1,201 @@ | |||
| 1 | /* | ||
| 2 | * Licensed under the GPL | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef __UM_SYSDEP_CHECKSUM_H | ||
| 6 | #define __UM_SYSDEP_CHECKSUM_H | ||
| 7 | |||
| 8 | #include "linux/in6.h" | ||
| 9 | #include "linux/string.h" | ||
| 10 | |||
| 11 | /* | ||
| 12 | * computes the checksum of a memory block at buff, length len, | ||
| 13 | * and adds in "sum" (32-bit) | ||
| 14 | * | ||
| 15 | * returns a 32-bit number suitable for feeding into itself | ||
| 16 | * or csum_tcpudp_magic | ||
| 17 | * | ||
| 18 | * this function must be called with even lengths, except | ||
| 19 | * for the last fragment, which may be odd | ||
| 20 | * | ||
| 21 | * it's best to have buff aligned on a 32-bit boundary | ||
| 22 | */ | ||
| 23 | __wsum csum_partial(const void *buff, int len, __wsum sum); | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Note: when you get a NULL pointer exception here this means someone | ||
| 27 | * passed in an incorrect kernel address to one of these functions. | ||
| 28 | * | ||
| 29 | * If you use these functions directly please don't forget the | ||
| 30 | * access_ok(). | ||
| 31 | */ | ||
| 32 | |||
| 33 | static __inline__ | ||
| 34 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, | ||
| 35 | int len, __wsum sum) | ||
| 36 | { | ||
| 37 | memcpy(dst, src, len); | ||
| 38 | return csum_partial(dst, len, sum); | ||
| 39 | } | ||
| 40 | |||
| 41 | /* | ||
| 42 | * the same as csum_partial, but copies from src while it | ||
| 43 | * checksums, and handles user-space pointer exceptions correctly, when needed. | ||
| 44 | * | ||
| 45 | * here even more important to align src and dst on a 32-bit (or even | ||
| 46 | * better 64-bit) boundary | ||
| 47 | */ | ||
| 48 | |||
| 49 | static __inline__ | ||
| 50 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | ||
| 51 | int len, __wsum sum, int *err_ptr) | ||
| 52 | { | ||
| 53 | if (copy_from_user(dst, src, len)) { | ||
| 54 | *err_ptr = -EFAULT; | ||
| 55 | return (__force __wsum)-1; | ||
| 56 | } | ||
| 57 | |||
| 58 | return csum_partial(dst, len, sum); | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
| 63 | * which always checksum on 4 octet boundaries. | ||
| 64 | * | ||
| 65 | * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by | ||
| 66 | * Arnt Gulbrandsen. | ||
| 67 | */ | ||
| 68 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
| 69 | { | ||
| 70 | unsigned int sum; | ||
| 71 | |||
| 72 | __asm__ __volatile__( | ||
| 73 | "movl (%1), %0 ;\n" | ||
| 74 | "subl $4, %2 ;\n" | ||
| 75 | "jbe 2f ;\n" | ||
| 76 | "addl 4(%1), %0 ;\n" | ||
| 77 | "adcl 8(%1), %0 ;\n" | ||
| 78 | "adcl 12(%1), %0 ;\n" | ||
| 79 | "1: adcl 16(%1), %0 ;\n" | ||
| 80 | "lea 4(%1), %1 ;\n" | ||
| 81 | "decl %2 ;\n" | ||
| 82 | "jne 1b ;\n" | ||
| 83 | "adcl $0, %0 ;\n" | ||
| 84 | "movl %0, %2 ;\n" | ||
| 85 | "shrl $16, %0 ;\n" | ||
| 86 | "addw %w2, %w0 ;\n" | ||
| 87 | "adcl $0, %0 ;\n" | ||
| 88 | "notl %0 ;\n" | ||
| 89 | "2: ;\n" | ||
| 90 | /* Since the input registers which are loaded with iph and ipl | ||
| 91 | are modified, we must also specify them as outputs, or gcc | ||
| 92 | will assume they contain their original values. */ | ||
| 93 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
| 94 | : "1" (iph), "2" (ihl) | ||
| 95 | : "memory"); | ||
| 96 | return (__force __sum16)sum; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Fold a partial checksum | ||
| 101 | */ | ||
| 102 | |||
| 103 | static inline __sum16 csum_fold(__wsum sum) | ||
| 104 | { | ||
| 105 | __asm__( | ||
| 106 | "addl %1, %0 ;\n" | ||
| 107 | "adcl $0xffff, %0 ;\n" | ||
| 108 | : "=r" (sum) | ||
| 109 | : "r" ((__force u32)sum << 16), | ||
| 110 | "0" ((__force u32)sum & 0xffff0000) | ||
| 111 | ); | ||
| 112 | return (__force __sum16)(~(__force u32)sum >> 16); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
| 116 | unsigned short len, | ||
| 117 | unsigned short proto, | ||
| 118 | __wsum sum) | ||
| 119 | { | ||
| 120 | __asm__( | ||
| 121 | "addl %1, %0 ;\n" | ||
| 122 | "adcl %2, %0 ;\n" | ||
| 123 | "adcl %3, %0 ;\n" | ||
| 124 | "adcl $0, %0 ;\n" | ||
| 125 | : "=r" (sum) | ||
| 126 | : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); | ||
| 127 | return sum; | ||
| 128 | } | ||
| 129 | |||
| 130 | /* | ||
| 131 | * computes the checksum of the TCP/UDP pseudo-header | ||
| 132 | * returns a 16-bit checksum, already complemented | ||
| 133 | */ | ||
| 134 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
| 135 | unsigned short len, | ||
| 136 | unsigned short proto, | ||
| 137 | __wsum sum) | ||
| 138 | { | ||
| 139 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
| 144 | * in icmp.c | ||
| 145 | */ | ||
| 146 | |||
| 147 | static inline __sum16 ip_compute_csum(const void *buff, int len) | ||
| 148 | { | ||
| 149 | return csum_fold (csum_partial(buff, len, 0)); | ||
| 150 | } | ||
| 151 | |||
| 152 | #define _HAVE_ARCH_IPV6_CSUM | ||
| 153 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | ||
| 154 | const struct in6_addr *daddr, | ||
| 155 | __u32 len, unsigned short proto, | ||
| 156 | __wsum sum) | ||
| 157 | { | ||
| 158 | __asm__( | ||
| 159 | "addl 0(%1), %0 ;\n" | ||
| 160 | "adcl 4(%1), %0 ;\n" | ||
| 161 | "adcl 8(%1), %0 ;\n" | ||
| 162 | "adcl 12(%1), %0 ;\n" | ||
| 163 | "adcl 0(%2), %0 ;\n" | ||
| 164 | "adcl 4(%2), %0 ;\n" | ||
| 165 | "adcl 8(%2), %0 ;\n" | ||
| 166 | "adcl 12(%2), %0 ;\n" | ||
| 167 | "adcl %3, %0 ;\n" | ||
| 168 | "adcl %4, %0 ;\n" | ||
| 169 | "adcl $0, %0 ;\n" | ||
| 170 | : "=&r" (sum) | ||
| 171 | : "r" (saddr), "r" (daddr), | ||
| 172 | "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); | ||
| 173 | |||
| 174 | return csum_fold(sum); | ||
| 175 | } | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Copy and checksum to user | ||
| 179 | */ | ||
| 180 | #define HAVE_CSUM_COPY_USER | ||
| 181 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | ||
| 182 | void __user *dst, | ||
| 183 | int len, __wsum sum, int *err_ptr) | ||
| 184 | { | ||
| 185 | if (access_ok(VERIFY_WRITE, dst, len)) { | ||
| 186 | if (copy_to_user(dst, src, len)) { | ||
| 187 | *err_ptr = -EFAULT; | ||
| 188 | return (__force __wsum)-1; | ||
| 189 | } | ||
| 190 | |||
| 191 | return csum_partial(src, len, sum); | ||
| 192 | } | ||
| 193 | |||
| 194 | if (len) | ||
| 195 | *err_ptr = -EFAULT; | ||
| 196 | |||
| 197 | return (__force __wsum)-1; /* invalid checksum */ | ||
| 198 | } | ||
| 199 | |||
| 200 | #endif | ||
| 201 | |||
diff --git a/arch/um/sys-i386/shared/sysdep/faultinfo.h b/arch/um/sys-i386/shared/sysdep/faultinfo.h new file mode 100644 index 00000000000..db437cc373b --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/faultinfo.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
| 3 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
| 4 | * Licensed under the GPL | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __FAULTINFO_I386_H | ||
| 8 | #define __FAULTINFO_I386_H | ||
| 9 | |||
| 10 | /* this structure contains the full arch-specific faultinfo | ||
| 11 | * from the traps. | ||
| 12 | * On i386, ptrace_faultinfo unfortunately doesn't provide | ||
| 13 | * all the info, since trap_no is missing. | ||
| 14 | * All common elements are defined at the same position in | ||
| 15 | * both structures, thus making it easy to copy the | ||
| 16 | * contents without knowledge about the structure elements. | ||
| 17 | */ | ||
| 18 | struct faultinfo { | ||
| 19 | int error_code; /* in ptrace_faultinfo misleadingly called is_write */ | ||
| 20 | unsigned long cr2; /* in ptrace_faultinfo called addr */ | ||
| 21 | int trap_no; /* missing in ptrace_faultinfo */ | ||
| 22 | }; | ||
| 23 | |||
| 24 | #define FAULT_WRITE(fi) ((fi).error_code & 2) | ||
| 25 | #define FAULT_ADDRESS(fi) ((fi).cr2) | ||
| 26 | |||
| 27 | #define PTRACE_FULL_FAULTINFO 0 | ||
| 28 | |||
| 29 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/host_ldt.h b/arch/um/sys-i386/shared/sysdep/host_ldt.h new file mode 100644 index 00000000000..0953cc4df65 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/host_ldt.h | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | #ifndef __ASM_HOST_LDT_I386_H | ||
| 2 | #define __ASM_HOST_LDT_I386_H | ||
| 3 | |||
| 4 | #include <asm/ldt.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * macros stolen from include/asm-i386/desc.h | ||
| 8 | */ | ||
| 9 | #define LDT_entry_a(info) \ | ||
| 10 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
| 11 | |||
| 12 | #define LDT_entry_b(info) \ | ||
| 13 | (((info)->base_addr & 0xff000000) | \ | ||
| 14 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
| 15 | ((info)->limit & 0xf0000) | \ | ||
| 16 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
| 17 | ((info)->contents << 10) | \ | ||
| 18 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
| 19 | ((info)->seg_32bit << 22) | \ | ||
| 20 | ((info)->limit_in_pages << 23) | \ | ||
| 21 | ((info)->useable << 20) | \ | ||
| 22 | 0x7000) | ||
| 23 | |||
| 24 | #define LDT_empty(info) (\ | ||
| 25 | (info)->base_addr == 0 && \ | ||
| 26 | (info)->limit == 0 && \ | ||
| 27 | (info)->contents == 0 && \ | ||
| 28 | (info)->read_exec_only == 1 && \ | ||
| 29 | (info)->seg_32bit == 0 && \ | ||
| 30 | (info)->limit_in_pages == 0 && \ | ||
| 31 | (info)->seg_not_present == 1 && \ | ||
| 32 | (info)->useable == 0 ) | ||
| 33 | |||
| 34 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/kernel-offsets.h b/arch/um/sys-i386/shared/sysdep/kernel-offsets.h new file mode 100644 index 00000000000..5868526b5ee --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/kernel-offsets.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #include <linux/stddef.h> | ||
| 2 | #include <linux/sched.h> | ||
| 3 | #include <linux/elf.h> | ||
| 4 | #include <linux/crypto.h> | ||
| 5 | #include <asm/mman.h> | ||
| 6 | |||
| 7 | #define DEFINE(sym, val) \ | ||
| 8 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
| 9 | |||
| 10 | #define STR(x) #x | ||
| 11 | #define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : ) | ||
| 12 | |||
| 13 | #define BLANK() asm volatile("\n->" : : ) | ||
| 14 | |||
| 15 | #define OFFSET(sym, str, mem) \ | ||
| 16 | DEFINE(sym, offsetof(struct str, mem)); | ||
| 17 | |||
| 18 | void foo(void) | ||
| 19 | { | ||
| 20 | #include <common-offsets.h> | ||
| 21 | } | ||
diff --git a/arch/um/sys-i386/shared/sysdep/ptrace.h b/arch/um/sys-i386/shared/sysdep/ptrace.h new file mode 100644 index 00000000000..c398a507611 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/ptrace.h | |||
| @@ -0,0 +1,171 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __SYSDEP_I386_PTRACE_H | ||
| 7 | #define __SYSDEP_I386_PTRACE_H | ||
| 8 | |||
| 9 | #include "user_constants.h" | ||
| 10 | #include "sysdep/faultinfo.h" | ||
| 11 | |||
| 12 | #define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long)) | ||
| 13 | #define MAX_REG_OFFSET (UM_FRAME_SIZE) | ||
| 14 | |||
| 15 | static inline void update_debugregs(int seq) {} | ||
| 16 | |||
| 17 | /* syscall emulation path in ptrace */ | ||
| 18 | |||
| 19 | #ifndef PTRACE_SYSEMU | ||
| 20 | #define PTRACE_SYSEMU 31 | ||
| 21 | #endif | ||
| 22 | |||
| 23 | void set_using_sysemu(int value); | ||
| 24 | int get_using_sysemu(void); | ||
| 25 | extern int sysemu_supported; | ||
| 26 | |||
| 27 | #include "skas_ptregs.h" | ||
| 28 | |||
| 29 | #define REGS_IP(r) ((r)[HOST_IP]) | ||
| 30 | #define REGS_SP(r) ((r)[HOST_SP]) | ||
| 31 | #define REGS_EFLAGS(r) ((r)[HOST_EFLAGS]) | ||
| 32 | #define REGS_EAX(r) ((r)[HOST_EAX]) | ||
| 33 | #define REGS_EBX(r) ((r)[HOST_EBX]) | ||
| 34 | #define REGS_ECX(r) ((r)[HOST_ECX]) | ||
| 35 | #define REGS_EDX(r) ((r)[HOST_EDX]) | ||
| 36 | #define REGS_ESI(r) ((r)[HOST_ESI]) | ||
| 37 | #define REGS_EDI(r) ((r)[HOST_EDI]) | ||
| 38 | #define REGS_EBP(r) ((r)[HOST_EBP]) | ||
| 39 | #define REGS_CS(r) ((r)[HOST_CS]) | ||
| 40 | #define REGS_SS(r) ((r)[HOST_SS]) | ||
| 41 | #define REGS_DS(r) ((r)[HOST_DS]) | ||
| 42 | #define REGS_ES(r) ((r)[HOST_ES]) | ||
| 43 | #define REGS_FS(r) ((r)[HOST_FS]) | ||
| 44 | #define REGS_GS(r) ((r)[HOST_GS]) | ||
| 45 | |||
| 46 | #define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res) | ||
| 47 | |||
| 48 | #define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r)) | ||
| 49 | |||
| 50 | #ifndef PTRACE_SYSEMU_SINGLESTEP | ||
| 51 | #define PTRACE_SYSEMU_SINGLESTEP 32 | ||
| 52 | #endif | ||
| 53 | |||
| 54 | struct uml_pt_regs { | ||
| 55 | unsigned long gp[MAX_REG_NR]; | ||
| 56 | unsigned long fp[HOST_FPX_SIZE]; | ||
| 57 | struct faultinfo faultinfo; | ||
| 58 | long syscall; | ||
| 59 | int is_user; | ||
| 60 | }; | ||
| 61 | |||
| 62 | #define EMPTY_UML_PT_REGS { } | ||
| 63 | |||
| 64 | #define UPT_IP(r) REGS_IP((r)->gp) | ||
| 65 | #define UPT_SP(r) REGS_SP((r)->gp) | ||
| 66 | #define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp) | ||
| 67 | #define UPT_EAX(r) REGS_EAX((r)->gp) | ||
| 68 | #define UPT_EBX(r) REGS_EBX((r)->gp) | ||
| 69 | #define UPT_ECX(r) REGS_ECX((r)->gp) | ||
| 70 | #define UPT_EDX(r) REGS_EDX((r)->gp) | ||
| 71 | #define UPT_ESI(r) REGS_ESI((r)->gp) | ||
| 72 | #define UPT_EDI(r) REGS_EDI((r)->gp) | ||
| 73 | #define UPT_EBP(r) REGS_EBP((r)->gp) | ||
| 74 | #define UPT_ORIG_EAX(r) ((r)->syscall) | ||
| 75 | #define UPT_CS(r) REGS_CS((r)->gp) | ||
| 76 | #define UPT_SS(r) REGS_SS((r)->gp) | ||
| 77 | #define UPT_DS(r) REGS_DS((r)->gp) | ||
| 78 | #define UPT_ES(r) REGS_ES((r)->gp) | ||
| 79 | #define UPT_FS(r) REGS_FS((r)->gp) | ||
| 80 | #define UPT_GS(r) REGS_GS((r)->gp) | ||
| 81 | |||
| 82 | #define UPT_SYSCALL_ARG1(r) UPT_EBX(r) | ||
| 83 | #define UPT_SYSCALL_ARG2(r) UPT_ECX(r) | ||
| 84 | #define UPT_SYSCALL_ARG3(r) UPT_EDX(r) | ||
| 85 | #define UPT_SYSCALL_ARG4(r) UPT_ESI(r) | ||
| 86 | #define UPT_SYSCALL_ARG5(r) UPT_EDI(r) | ||
| 87 | #define UPT_SYSCALL_ARG6(r) UPT_EBP(r) | ||
| 88 | |||
| 89 | extern int user_context(unsigned long sp); | ||
| 90 | |||
| 91 | #define UPT_IS_USER(r) ((r)->is_user) | ||
| 92 | |||
| 93 | struct syscall_args { | ||
| 94 | unsigned long args[6]; | ||
| 95 | }; | ||
| 96 | |||
| 97 | #define SYSCALL_ARGS(r) ((struct syscall_args) \ | ||
| 98 | { .args = { UPT_SYSCALL_ARG1(r), \ | ||
| 99 | UPT_SYSCALL_ARG2(r), \ | ||
| 100 | UPT_SYSCALL_ARG3(r), \ | ||
| 101 | UPT_SYSCALL_ARG4(r), \ | ||
| 102 | UPT_SYSCALL_ARG5(r), \ | ||
| 103 | UPT_SYSCALL_ARG6(r) } } ) | ||
| 104 | |||
| 105 | #define UPT_REG(regs, reg) \ | ||
| 106 | ({ unsigned long val; \ | ||
| 107 | switch(reg){ \ | ||
| 108 | case EIP: val = UPT_IP(regs); break; \ | ||
| 109 | case UESP: val = UPT_SP(regs); break; \ | ||
| 110 | case EAX: val = UPT_EAX(regs); break; \ | ||
| 111 | case EBX: val = UPT_EBX(regs); break; \ | ||
| 112 | case ECX: val = UPT_ECX(regs); break; \ | ||
| 113 | case EDX: val = UPT_EDX(regs); break; \ | ||
| 114 | case ESI: val = UPT_ESI(regs); break; \ | ||
| 115 | case EDI: val = UPT_EDI(regs); break; \ | ||
| 116 | case EBP: val = UPT_EBP(regs); break; \ | ||
| 117 | case ORIG_EAX: val = UPT_ORIG_EAX(regs); break; \ | ||
| 118 | case CS: val = UPT_CS(regs); break; \ | ||
| 119 | case SS: val = UPT_SS(regs); break; \ | ||
| 120 | case DS: val = UPT_DS(regs); break; \ | ||
| 121 | case ES: val = UPT_ES(regs); break; \ | ||
| 122 | case FS: val = UPT_FS(regs); break; \ | ||
| 123 | case GS: val = UPT_GS(regs); break; \ | ||
| 124 | case EFL: val = UPT_EFLAGS(regs); break; \ | ||
| 125 | default : \ | ||
| 126 | panic("Bad register in UPT_REG : %d\n", reg); \ | ||
| 127 | val = -1; \ | ||
| 128 | } \ | ||
| 129 | val; \ | ||
| 130 | }) | ||
| 131 | |||
| 132 | #define UPT_SET(regs, reg, val) \ | ||
| 133 | do { \ | ||
| 134 | switch(reg){ \ | ||
| 135 | case EIP: UPT_IP(regs) = val; break; \ | ||
| 136 | case UESP: UPT_SP(regs) = val; break; \ | ||
| 137 | case EAX: UPT_EAX(regs) = val; break; \ | ||
| 138 | case EBX: UPT_EBX(regs) = val; break; \ | ||
| 139 | case ECX: UPT_ECX(regs) = val; break; \ | ||
| 140 | case EDX: UPT_EDX(regs) = val; break; \ | ||
| 141 | case ESI: UPT_ESI(regs) = val; break; \ | ||
| 142 | case EDI: UPT_EDI(regs) = val; break; \ | ||
| 143 | case EBP: UPT_EBP(regs) = val; break; \ | ||
| 144 | case ORIG_EAX: UPT_ORIG_EAX(regs) = val; break; \ | ||
| 145 | case CS: UPT_CS(regs) = val; break; \ | ||
| 146 | case SS: UPT_SS(regs) = val; break; \ | ||
| 147 | case DS: UPT_DS(regs) = val; break; \ | ||
| 148 | case ES: UPT_ES(regs) = val; break; \ | ||
| 149 | case FS: UPT_FS(regs) = val; break; \ | ||
| 150 | case GS: UPT_GS(regs) = val; break; \ | ||
| 151 | case EFL: UPT_EFLAGS(regs) = val; break; \ | ||
| 152 | default : \ | ||
| 153 | panic("Bad register in UPT_SET : %d\n", reg); \ | ||
| 154 | break; \ | ||
| 155 | } \ | ||
| 156 | } while (0) | ||
| 157 | |||
| 158 | #define UPT_SET_SYSCALL_RETURN(r, res) \ | ||
| 159 | REGS_SET_SYSCALL_RETURN((r)->regs, (res)) | ||
| 160 | |||
| 161 | #define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp) | ||
| 162 | |||
| 163 | #define UPT_ORIG_SYSCALL(r) UPT_EAX(r) | ||
| 164 | #define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r) | ||
| 165 | #define UPT_SYSCALL_RET(r) UPT_EAX(r) | ||
| 166 | |||
| 167 | #define UPT_FAULTINFO(r) (&(r)->faultinfo) | ||
| 168 | |||
| 169 | extern void arch_init_registers(int pid); | ||
| 170 | |||
| 171 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/ptrace_user.h b/arch/um/sys-i386/shared/sysdep/ptrace_user.h new file mode 100644 index 00000000000..ef56247e414 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/ptrace_user.h | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __SYSDEP_I386_PTRACE_USER_H__ | ||
| 7 | #define __SYSDEP_I386_PTRACE_USER_H__ | ||
| 8 | |||
| 9 | #include <sys/ptrace.h> | ||
| 10 | #include <linux/ptrace.h> | ||
| 11 | #include <asm/ptrace.h> | ||
| 12 | #include "user_constants.h" | ||
| 13 | |||
| 14 | #define PT_OFFSET(r) ((r) * sizeof(long)) | ||
| 15 | |||
| 16 | #define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX]) | ||
| 17 | #define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX) | ||
| 18 | |||
| 19 | #define PT_SYSCALL_ARG1_OFFSET PT_OFFSET(EBX) | ||
| 20 | #define PT_SYSCALL_ARG2_OFFSET PT_OFFSET(ECX) | ||
| 21 | #define PT_SYSCALL_ARG3_OFFSET PT_OFFSET(EDX) | ||
| 22 | #define PT_SYSCALL_ARG4_OFFSET PT_OFFSET(ESI) | ||
| 23 | #define PT_SYSCALL_ARG5_OFFSET PT_OFFSET(EDI) | ||
| 24 | #define PT_SYSCALL_ARG6_OFFSET PT_OFFSET(EBP) | ||
| 25 | |||
| 26 | #define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX) | ||
| 27 | |||
| 28 | #define REGS_SYSCALL_NR EAX /* This is used before a system call */ | ||
| 29 | #define REGS_SYSCALL_ARG1 EBX | ||
| 30 | #define REGS_SYSCALL_ARG2 ECX | ||
| 31 | #define REGS_SYSCALL_ARG3 EDX | ||
| 32 | #define REGS_SYSCALL_ARG4 ESI | ||
| 33 | #define REGS_SYSCALL_ARG5 EDI | ||
| 34 | #define REGS_SYSCALL_ARG6 EBP | ||
| 35 | |||
| 36 | #define REGS_IP_INDEX EIP | ||
| 37 | #define REGS_SP_INDEX UESP | ||
| 38 | |||
| 39 | #define PT_IP_OFFSET PT_OFFSET(EIP) | ||
| 40 | #define PT_IP(regs) ((regs)[EIP]) | ||
| 41 | #define PT_SP_OFFSET PT_OFFSET(UESP) | ||
| 42 | #define PT_SP(regs) ((regs)[UESP]) | ||
| 43 | |||
| 44 | #define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE) | ||
| 45 | |||
| 46 | #ifndef FRAME_SIZE | ||
| 47 | #define FRAME_SIZE (17) | ||
| 48 | #endif | ||
| 49 | |||
| 50 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/sc.h b/arch/um/sys-i386/shared/sysdep/sc.h new file mode 100644 index 00000000000..c57d1780ad3 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/sc.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | #ifndef __SYSDEP_I386_SC_H | ||
| 2 | #define __SYSDEP_I386_SC_H | ||
| 3 | |||
| 4 | #include <user_constants.h> | ||
| 5 | |||
| 6 | #define SC_OFFSET(sc, field) \ | ||
| 7 | *((unsigned long *) &(((char *) (sc))[HOST_##field])) | ||
| 8 | #define SC_FP_OFFSET(sc, field) \ | ||
| 9 | *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field])) | ||
| 10 | #define SC_FP_OFFSET_PTR(sc, field, type) \ | ||
| 11 | ((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field])) | ||
| 12 | |||
| 13 | #define SC_IP(sc) SC_OFFSET(sc, SC_IP) | ||
| 14 | #define SC_SP(sc) SC_OFFSET(sc, SC_SP) | ||
| 15 | #define SC_FS(sc) SC_OFFSET(sc, SC_FS) | ||
| 16 | #define SC_GS(sc) SC_OFFSET(sc, SC_GS) | ||
| 17 | #define SC_DS(sc) SC_OFFSET(sc, SC_DS) | ||
| 18 | #define SC_ES(sc) SC_OFFSET(sc, SC_ES) | ||
| 19 | #define SC_SS(sc) SC_OFFSET(sc, SC_SS) | ||
| 20 | #define SC_CS(sc) SC_OFFSET(sc, SC_CS) | ||
| 21 | #define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS) | ||
| 22 | #define SC_EAX(sc) SC_OFFSET(sc, SC_EAX) | ||
| 23 | #define SC_EBX(sc) SC_OFFSET(sc, SC_EBX) | ||
| 24 | #define SC_ECX(sc) SC_OFFSET(sc, SC_ECX) | ||
| 25 | #define SC_EDX(sc) SC_OFFSET(sc, SC_EDX) | ||
| 26 | #define SC_EDI(sc) SC_OFFSET(sc, SC_EDI) | ||
| 27 | #define SC_ESI(sc) SC_OFFSET(sc, SC_ESI) | ||
| 28 | #define SC_EBP(sc) SC_OFFSET(sc, SC_EBP) | ||
| 29 | #define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO) | ||
| 30 | #define SC_ERR(sc) SC_OFFSET(sc, SC_ERR) | ||
| 31 | #define SC_CR2(sc) SC_OFFSET(sc, SC_CR2) | ||
| 32 | #define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE) | ||
| 33 | #define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK) | ||
| 34 | #define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW) | ||
| 35 | #define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW) | ||
| 36 | #define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG) | ||
| 37 | #define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF) | ||
| 38 | #define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL) | ||
| 39 | #define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF) | ||
| 40 | #define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL) | ||
| 41 | #define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate) | ||
| 42 | #define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void) | ||
| 43 | |||
| 44 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/sigcontext.h b/arch/um/sys-i386/shared/sysdep/sigcontext.h new file mode 100644 index 00000000000..f583c87111a --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/sigcontext.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __SYS_SIGCONTEXT_I386_H | ||
| 7 | #define __SYS_SIGCONTEXT_I386_H | ||
| 8 | |||
| 9 | #include "sysdep/sc.h" | ||
| 10 | |||
| 11 | #define IP_RESTART_SYSCALL(ip) ((ip) -= 2) | ||
| 12 | |||
| 13 | #define GET_FAULTINFO_FROM_SC(fi, sc) \ | ||
| 14 | { \ | ||
| 15 | (fi).cr2 = SC_CR2(sc); \ | ||
| 16 | (fi).error_code = SC_ERR(sc); \ | ||
| 17 | (fi).trap_no = SC_TRAPNO(sc); \ | ||
| 18 | } | ||
| 19 | |||
| 20 | /* This is Page Fault */ | ||
| 21 | #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) | ||
| 22 | |||
| 23 | /* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */ | ||
| 24 | #define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo) | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/skas_ptrace.h b/arch/um/sys-i386/shared/sysdep/skas_ptrace.h new file mode 100644 index 00000000000..e27b8a79177 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/skas_ptrace.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __SYSDEP_I386_SKAS_PTRACE_H | ||
| 7 | #define __SYSDEP_I386_SKAS_PTRACE_H | ||
| 8 | |||
| 9 | struct ptrace_faultinfo { | ||
| 10 | int is_write; | ||
| 11 | unsigned long addr; | ||
| 12 | }; | ||
| 13 | |||
| 14 | struct ptrace_ldt { | ||
| 15 | int func; | ||
| 16 | void *ptr; | ||
| 17 | unsigned long bytecount; | ||
| 18 | }; | ||
| 19 | |||
| 20 | #define PTRACE_LDT 54 | ||
| 21 | |||
| 22 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/stub.h b/arch/um/sys-i386/shared/sysdep/stub.h new file mode 100644 index 00000000000..977dedd9221 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/stub.h | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __SYSDEP_STUB_H | ||
| 7 | #define __SYSDEP_STUB_H | ||
| 8 | |||
| 9 | #include <sys/mman.h> | ||
| 10 | #include <asm/ptrace.h> | ||
| 11 | #include <asm/unistd.h> | ||
| 12 | #include "as-layout.h" | ||
| 13 | #include "stub-data.h" | ||
| 14 | #include "kern_constants.h" | ||
| 15 | |||
| 16 | extern void stub_segv_handler(int sig); | ||
| 17 | extern void stub_clone_handler(void); | ||
| 18 | |||
| 19 | #define STUB_SYSCALL_RET EAX | ||
| 20 | #define STUB_MMAP_NR __NR_mmap2 | ||
| 21 | #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT) | ||
| 22 | |||
| 23 | static inline long stub_syscall0(long syscall) | ||
| 24 | { | ||
| 25 | long ret; | ||
| 26 | |||
| 27 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)); | ||
| 28 | |||
| 29 | return ret; | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline long stub_syscall1(long syscall, long arg1) | ||
| 33 | { | ||
| 34 | long ret; | ||
| 35 | |||
| 36 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)); | ||
| 37 | |||
| 38 | return ret; | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline long stub_syscall2(long syscall, long arg1, long arg2) | ||
| 42 | { | ||
| 43 | long ret; | ||
| 44 | |||
| 45 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
| 46 | "c" (arg2)); | ||
| 47 | |||
| 48 | return ret; | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3) | ||
| 52 | { | ||
| 53 | long ret; | ||
| 54 | |||
| 55 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
| 56 | "c" (arg2), "d" (arg3)); | ||
| 57 | |||
| 58 | return ret; | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3, | ||
| 62 | long arg4) | ||
| 63 | { | ||
| 64 | long ret; | ||
| 65 | |||
| 66 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
| 67 | "c" (arg2), "d" (arg3), "S" (arg4)); | ||
| 68 | |||
| 69 | return ret; | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3, | ||
| 73 | long arg4, long arg5) | ||
| 74 | { | ||
| 75 | long ret; | ||
| 76 | |||
| 77 | __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), | ||
| 78 | "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)); | ||
| 79 | |||
| 80 | return ret; | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline void trap_myself(void) | ||
| 84 | { | ||
| 85 | __asm("int3"); | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void remap_stack(int fd, unsigned long offset) | ||
| 89 | { | ||
| 90 | __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;" | ||
| 91 | "movl %7, %%ebx ; movl %%eax, (%%ebx)" | ||
| 92 | : : "g" (STUB_MMAP_NR), "b" (STUB_DATA), | ||
| 93 | "c" (UM_KERN_PAGE_SIZE), | ||
| 94 | "d" (PROT_READ | PROT_WRITE), | ||
| 95 | "S" (MAP_FIXED | MAP_SHARED), "D" (fd), | ||
| 96 | "a" (offset), | ||
| 97 | "i" (&((struct stub_data *) STUB_DATA)->err) | ||
| 98 | : "memory"); | ||
| 99 | } | ||
| 100 | |||
| 101 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h new file mode 100644 index 00000000000..05cb796aecb --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/syscalls.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "asm/unistd.h" | ||
| 7 | #include "sysdep/ptrace.h" | ||
| 8 | |||
| 9 | typedef long syscall_handler_t(struct pt_regs); | ||
| 10 | |||
| 11 | /* Not declared on x86, incompatible declarations on x86_64, so these have | ||
| 12 | * to go here rather than in sys_call_table.c | ||
| 13 | */ | ||
| 14 | extern syscall_handler_t sys_rt_sigaction; | ||
| 15 | |||
| 16 | extern syscall_handler_t *sys_call_table[]; | ||
| 17 | |||
| 18 | #define EXECUTE_SYSCALL(syscall, regs) \ | ||
| 19 | ((long (*)(struct syscall_args)) \ | ||
| 20 | (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) | ||
diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h new file mode 100644 index 00000000000..d1b93c43620 --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/system.h | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
| 2 | #define _ASM_X86_SYSTEM_H_ | ||
| 3 | |||
| 4 | #include <asm/asm.h> | ||
| 5 | #include <asm/segment.h> | ||
| 6 | #include <asm/cpufeature.h> | ||
| 7 | #include <asm/cmpxchg.h> | ||
| 8 | #include <asm/nops.h> | ||
| 9 | |||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/irqflags.h> | ||
| 12 | |||
| 13 | /* entries in ARCH_DLINFO: */ | ||
| 14 | #ifdef CONFIG_IA32_EMULATION | ||
| 15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
| 16 | #else | ||
| 17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
| 18 | #endif | ||
| 19 | |||
| 20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
| 21 | |||
| 22 | void default_idle(void); | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Force strict CPU ordering. | ||
| 26 | * And yes, this is required on UP too when we're talking | ||
| 27 | * to devices. | ||
| 28 | */ | ||
| 29 | #ifdef CONFIG_X86_32 | ||
| 30 | /* | ||
| 31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
| 32 | * nop for these. | ||
| 33 | */ | ||
| 34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
| 35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
| 36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
| 37 | #else | ||
| 38 | #define mb() asm volatile("mfence":::"memory") | ||
| 39 | #define rmb() asm volatile("lfence":::"memory") | ||
| 40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
| 41 | #endif | ||
| 42 | |||
| 43 | /** | ||
| 44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
| 45 | * depend on. | ||
| 46 | * | ||
| 47 | * No data-dependent reads from memory-like regions are ever reordered | ||
| 48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
| 49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
| 50 | * reads following this primitive that depend on the data return by | ||
| 51 | * any of the preceding reads. This primitive is much lighter weight than | ||
| 52 | * rmb() on most CPUs, and is never heavier weight than is | ||
| 53 | * rmb(). | ||
| 54 | * | ||
| 55 | * These ordering constraints are respected by both the local CPU | ||
| 56 | * and the compiler. | ||
| 57 | * | ||
| 58 | * Ordering is not guaranteed by anything other than these primitives, | ||
| 59 | * not even by data dependencies. See the documentation for | ||
| 60 | * memory_barrier() for examples and URLs to more information. | ||
| 61 | * | ||
| 62 | * For example, the following code would force ordering (the initial | ||
| 63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
| 64 | * | ||
| 65 | * <programlisting> | ||
| 66 | * CPU 0 CPU 1 | ||
| 67 | * | ||
| 68 | * b = 2; | ||
| 69 | * memory_barrier(); | ||
| 70 | * p = &b; q = p; | ||
| 71 | * read_barrier_depends(); | ||
| 72 | * d = *q; | ||
| 73 | * </programlisting> | ||
| 74 | * | ||
| 75 | * because the read of "*q" depends on the read of "p" and these | ||
| 76 | * two reads are separated by a read_barrier_depends(). However, | ||
| 77 | * the following code, with the same initial values for "a" and "b": | ||
| 78 | * | ||
| 79 | * <programlisting> | ||
| 80 | * CPU 0 CPU 1 | ||
| 81 | * | ||
| 82 | * a = 2; | ||
| 83 | * memory_barrier(); | ||
| 84 | * b = 3; y = b; | ||
| 85 | * read_barrier_depends(); | ||
| 86 | * x = a; | ||
| 87 | * </programlisting> | ||
| 88 | * | ||
| 89 | * does not enforce ordering, since there is no data dependency between | ||
| 90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
| 91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
| 92 | * in cases like this where there are no data dependencies. | ||
| 93 | **/ | ||
| 94 | |||
| 95 | #define read_barrier_depends() do { } while (0) | ||
| 96 | |||
| 97 | #ifdef CONFIG_SMP | ||
| 98 | #define smp_mb() mb() | ||
| 99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
| 100 | # define smp_rmb() rmb() | ||
| 101 | #else | ||
| 102 | # define smp_rmb() barrier() | ||
| 103 | #endif | ||
| 104 | #ifdef CONFIG_X86_OOSTORE | ||
| 105 | # define smp_wmb() wmb() | ||
| 106 | #else | ||
| 107 | # define smp_wmb() barrier() | ||
| 108 | #endif | ||
| 109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
| 110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
| 111 | #else | ||
| 112 | #define smp_mb() barrier() | ||
| 113 | #define smp_rmb() barrier() | ||
| 114 | #define smp_wmb() barrier() | ||
| 115 | #define smp_read_barrier_depends() do { } while (0) | ||
| 116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
| 117 | #endif | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
| 121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
| 122 | * code region. | ||
| 123 | * | ||
| 124 | * (Could use an alternative three way for this if there was one.) | ||
| 125 | */ | ||
| 126 | static inline void rdtsc_barrier(void) | ||
| 127 | { | ||
| 128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
| 129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
| 130 | } | ||
| 131 | |||
| 132 | #endif | ||
diff --git a/arch/um/sys-i386/shared/sysdep/tls.h b/arch/um/sys-i386/shared/sysdep/tls.h new file mode 100644 index 00000000000..34550755b2a --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/tls.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | #ifndef _SYSDEP_TLS_H | ||
| 2 | #define _SYSDEP_TLS_H | ||
| 3 | |||
| 4 | # ifndef __KERNEL__ | ||
| 5 | |||
| 6 | /* Change name to avoid conflicts with the original one from <asm/ldt.h>, which | ||
| 7 | * may be named user_desc (but in 2.4 and in header matching its API was named | ||
| 8 | * modify_ldt_ldt_s). */ | ||
| 9 | |||
| 10 | typedef struct um_dup_user_desc { | ||
| 11 | unsigned int entry_number; | ||
| 12 | unsigned int base_addr; | ||
| 13 | unsigned int limit; | ||
| 14 | unsigned int seg_32bit:1; | ||
| 15 | unsigned int contents:2; | ||
| 16 | unsigned int read_exec_only:1; | ||
| 17 | unsigned int limit_in_pages:1; | ||
| 18 | unsigned int seg_not_present:1; | ||
| 19 | unsigned int useable:1; | ||
| 20 | } user_desc_t; | ||
| 21 | |||
| 22 | # else /* __KERNEL__ */ | ||
| 23 | |||
| 24 | # include <ldt.h> | ||
| 25 | typedef struct user_desc user_desc_t; | ||
| 26 | |||
| 27 | # endif /* __KERNEL__ */ | ||
| 28 | |||
| 29 | #define GDT_ENTRY_TLS_MIN_I386 6 | ||
| 30 | #define GDT_ENTRY_TLS_MIN_X86_64 12 | ||
| 31 | |||
| 32 | #endif /* _SYSDEP_TLS_H */ | ||
diff --git a/arch/um/sys-i386/shared/sysdep/vm-flags.h b/arch/um/sys-i386/shared/sysdep/vm-flags.h new file mode 100644 index 00000000000..e0d24c568db --- /dev/null +++ b/arch/um/sys-i386/shared/sysdep/vm-flags.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __VM_FLAGS_I386_H | ||
| 7 | #define __VM_FLAGS_I386_H | ||
| 8 | |||
| 9 | #define VM_DATA_DEFAULT_FLAGS \ | ||
| 10 | (VM_READ | VM_WRITE | \ | ||
| 11 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
| 12 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
| 13 | |||
| 14 | #endif | ||
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c new file mode 100644 index 00000000000..89a46626bfd --- /dev/null +++ b/arch/um/sys-i386/signal.c | |||
| @@ -0,0 +1,508 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/ptrace.h> | ||
| 7 | #include <asm/unistd.h> | ||
| 8 | #include <asm/uaccess.h> | ||
| 9 | #include <asm/ucontext.h> | ||
| 10 | #include "frame_kern.h" | ||
| 11 | #include "skas.h" | ||
| 12 | |||
| 13 | void copy_sc(struct uml_pt_regs *regs, void *from) | ||
| 14 | { | ||
| 15 | struct sigcontext *sc = from; | ||
| 16 | |||
| 17 | REGS_GS(regs->gp) = sc->gs; | ||
| 18 | REGS_FS(regs->gp) = sc->fs; | ||
| 19 | REGS_ES(regs->gp) = sc->es; | ||
| 20 | REGS_DS(regs->gp) = sc->ds; | ||
| 21 | REGS_EDI(regs->gp) = sc->di; | ||
| 22 | REGS_ESI(regs->gp) = sc->si; | ||
| 23 | REGS_EBP(regs->gp) = sc->bp; | ||
| 24 | REGS_SP(regs->gp) = sc->sp; | ||
| 25 | REGS_EBX(regs->gp) = sc->bx; | ||
| 26 | REGS_EDX(regs->gp) = sc->dx; | ||
| 27 | REGS_ECX(regs->gp) = sc->cx; | ||
| 28 | REGS_EAX(regs->gp) = sc->ax; | ||
| 29 | REGS_IP(regs->gp) = sc->ip; | ||
| 30 | REGS_CS(regs->gp) = sc->cs; | ||
| 31 | REGS_EFLAGS(regs->gp) = sc->flags; | ||
| 32 | REGS_SS(regs->gp) = sc->ss; | ||
| 33 | } | ||
| 34 | |||
| 35 | /* | ||
| 36 | * FPU tag word conversions. | ||
| 37 | */ | ||
| 38 | |||
| 39 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) | ||
| 40 | { | ||
| 41 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | ||
| 42 | |||
| 43 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | ||
| 44 | tmp = ~twd; | ||
| 45 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | ||
| 46 | /* and move the valid bits to the lower byte. */ | ||
| 47 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | ||
| 48 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | ||
| 49 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | ||
| 50 | return tmp; | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave) | ||
| 54 | { | ||
| 55 | struct _fpxreg *st = NULL; | ||
| 56 | unsigned long twd = (unsigned long) fxsave->twd; | ||
| 57 | unsigned long tag; | ||
| 58 | unsigned long ret = 0xffff0000; | ||
| 59 | int i; | ||
| 60 | |||
| 61 | #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16) | ||
| 62 | |||
| 63 | for (i = 0; i < 8; i++) { | ||
| 64 | if (twd & 0x1) { | ||
| 65 | st = (struct _fpxreg *) FPREG_ADDR(fxsave, i); | ||
| 66 | |||
| 67 | switch (st->exponent & 0x7fff) { | ||
| 68 | case 0x7fff: | ||
| 69 | tag = 2; /* Special */ | ||
| 70 | break; | ||
| 71 | case 0x0000: | ||
| 72 | if ( !st->significand[0] && | ||
| 73 | !st->significand[1] && | ||
| 74 | !st->significand[2] && | ||
| 75 | !st->significand[3] ) { | ||
| 76 | tag = 1; /* Zero */ | ||
| 77 | } else { | ||
| 78 | tag = 2; /* Special */ | ||
| 79 | } | ||
| 80 | break; | ||
| 81 | default: | ||
| 82 | if (st->significand[3] & 0x8000) { | ||
| 83 | tag = 0; /* Valid */ | ||
| 84 | } else { | ||
| 85 | tag = 2; /* Special */ | ||
| 86 | } | ||
| 87 | break; | ||
| 88 | } | ||
| 89 | } else { | ||
| 90 | tag = 3; /* Empty */ | ||
| 91 | } | ||
| 92 | ret |= (tag << (2 * i)); | ||
| 93 | twd = twd >> 1; | ||
| 94 | } | ||
| 95 | return ret; | ||
| 96 | } | ||
| 97 | |||
| 98 | static int convert_fxsr_to_user(struct _fpstate __user *buf, | ||
| 99 | struct user_fxsr_struct *fxsave) | ||
| 100 | { | ||
| 101 | unsigned long env[7]; | ||
| 102 | struct _fpreg __user *to; | ||
| 103 | struct _fpxreg *from; | ||
| 104 | int i; | ||
| 105 | |||
| 106 | env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; | ||
| 107 | env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; | ||
| 108 | env[2] = twd_fxsr_to_i387(fxsave); | ||
| 109 | env[3] = fxsave->fip; | ||
| 110 | env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); | ||
| 111 | env[5] = fxsave->foo; | ||
| 112 | env[6] = fxsave->fos; | ||
| 113 | |||
| 114 | if (__copy_to_user(buf, env, 7 * sizeof(unsigned long))) | ||
| 115 | return 1; | ||
| 116 | |||
| 117 | to = &buf->_st[0]; | ||
| 118 | from = (struct _fpxreg *) &fxsave->st_space[0]; | ||
| 119 | for (i = 0; i < 8; i++, to++, from++) { | ||
| 120 | unsigned long __user *t = (unsigned long __user *)to; | ||
| 121 | unsigned long *f = (unsigned long *)from; | ||
| 122 | |||
| 123 | if (__put_user(*f, t) || | ||
| 124 | __put_user(*(f + 1), t + 1) || | ||
| 125 | __put_user(from->exponent, &to->exponent)) | ||
| 126 | return 1; | ||
| 127 | } | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave, | ||
| 132 | struct _fpstate __user *buf) | ||
| 133 | { | ||
| 134 | unsigned long env[7]; | ||
| 135 | struct _fpxreg *to; | ||
| 136 | struct _fpreg __user *from; | ||
| 137 | int i; | ||
| 138 | |||
| 139 | if (copy_from_user( env, buf, 7 * sizeof(long))) | ||
| 140 | return 1; | ||
| 141 | |||
| 142 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); | ||
| 143 | fxsave->swd = (unsigned short)(env[1] & 0xffff); | ||
| 144 | fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); | ||
| 145 | fxsave->fip = env[3]; | ||
| 146 | fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); | ||
| 147 | fxsave->fcs = (env[4] & 0xffff); | ||
| 148 | fxsave->foo = env[5]; | ||
| 149 | fxsave->fos = env[6]; | ||
| 150 | |||
| 151 | to = (struct _fpxreg *) &fxsave->st_space[0]; | ||
| 152 | from = &buf->_st[0]; | ||
| 153 | for (i = 0; i < 8; i++, to++, from++) { | ||
| 154 | unsigned long *t = (unsigned long *)to; | ||
| 155 | unsigned long __user *f = (unsigned long __user *)from; | ||
| 156 | |||
| 157 | if (__get_user(*t, f) || | ||
| 158 | __get_user(*(t + 1), f + 1) || | ||
| 159 | __get_user(to->exponent, &from->exponent)) | ||
| 160 | return 1; | ||
| 161 | } | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | extern int have_fpx_regs; | ||
| 166 | |||
| 167 | static int copy_sc_from_user(struct pt_regs *regs, | ||
| 168 | struct sigcontext __user *from) | ||
| 169 | { | ||
| 170 | struct sigcontext sc; | ||
| 171 | int err, pid; | ||
| 172 | |||
| 173 | err = copy_from_user(&sc, from, sizeof(sc)); | ||
| 174 | if (err) | ||
| 175 | return err; | ||
| 176 | |||
| 177 | pid = userspace_pid[current_thread_info()->cpu]; | ||
| 178 | copy_sc(®s->regs, &sc); | ||
| 179 | if (have_fpx_regs) { | ||
| 180 | struct user_fxsr_struct fpx; | ||
| 181 | |||
| 182 | err = copy_from_user(&fpx, | ||
| 183 | &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0], | ||
| 184 | sizeof(struct user_fxsr_struct)); | ||
| 185 | if (err) | ||
| 186 | return 1; | ||
| 187 | |||
| 188 | err = convert_fxsr_from_user(&fpx, sc.fpstate); | ||
| 189 | if (err) | ||
| 190 | return 1; | ||
| 191 | |||
| 192 | err = restore_fpx_registers(pid, (unsigned long *) &fpx); | ||
| 193 | if (err < 0) { | ||
| 194 | printk(KERN_ERR "copy_sc_from_user - " | ||
| 195 | "restore_fpx_registers failed, errno = %d\n", | ||
| 196 | -err); | ||
| 197 | return 1; | ||
| 198 | } | ||
| 199 | } | ||
| 200 | else { | ||
| 201 | struct user_i387_struct fp; | ||
| 202 | |||
| 203 | err = copy_from_user(&fp, sc.fpstate, | ||
| 204 | sizeof(struct user_i387_struct)); | ||
| 205 | if (err) | ||
| 206 | return 1; | ||
| 207 | |||
| 208 | err = restore_fp_registers(pid, (unsigned long *) &fp); | ||
| 209 | if (err < 0) { | ||
| 210 | printk(KERN_ERR "copy_sc_from_user - " | ||
| 211 | "restore_fp_registers failed, errno = %d\n", | ||
| 212 | -err); | ||
| 213 | return 1; | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | static int copy_sc_to_user(struct sigcontext __user *to, | ||
| 221 | struct _fpstate __user *to_fp, struct pt_regs *regs, | ||
| 222 | unsigned long sp) | ||
| 223 | { | ||
| 224 | struct sigcontext sc; | ||
| 225 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | ||
| 226 | int err, pid; | ||
| 227 | |||
| 228 | sc.gs = REGS_GS(regs->regs.gp); | ||
| 229 | sc.fs = REGS_FS(regs->regs.gp); | ||
| 230 | sc.es = REGS_ES(regs->regs.gp); | ||
| 231 | sc.ds = REGS_DS(regs->regs.gp); | ||
| 232 | sc.di = REGS_EDI(regs->regs.gp); | ||
| 233 | sc.si = REGS_ESI(regs->regs.gp); | ||
| 234 | sc.bp = REGS_EBP(regs->regs.gp); | ||
| 235 | sc.sp = sp; | ||
| 236 | sc.bx = REGS_EBX(regs->regs.gp); | ||
| 237 | sc.dx = REGS_EDX(regs->regs.gp); | ||
| 238 | sc.cx = REGS_ECX(regs->regs.gp); | ||
| 239 | sc.ax = REGS_EAX(regs->regs.gp); | ||
| 240 | sc.ip = REGS_IP(regs->regs.gp); | ||
| 241 | sc.cs = REGS_CS(regs->regs.gp); | ||
| 242 | sc.flags = REGS_EFLAGS(regs->regs.gp); | ||
| 243 | sc.sp_at_signal = regs->regs.gp[UESP]; | ||
| 244 | sc.ss = regs->regs.gp[SS]; | ||
| 245 | sc.cr2 = fi->cr2; | ||
| 246 | sc.err = fi->error_code; | ||
| 247 | sc.trapno = fi->trap_no; | ||
| 248 | |||
| 249 | to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1)); | ||
| 250 | sc.fpstate = to_fp; | ||
| 251 | |||
| 252 | pid = userspace_pid[current_thread_info()->cpu]; | ||
| 253 | if (have_fpx_regs) { | ||
| 254 | struct user_fxsr_struct fpx; | ||
| 255 | |||
| 256 | err = save_fpx_registers(pid, (unsigned long *) &fpx); | ||
| 257 | if (err < 0){ | ||
| 258 | printk(KERN_ERR "copy_sc_to_user - save_fpx_registers " | ||
| 259 | "failed, errno = %d\n", err); | ||
| 260 | return 1; | ||
| 261 | } | ||
| 262 | |||
| 263 | err = convert_fxsr_to_user(to_fp, &fpx); | ||
| 264 | if (err) | ||
| 265 | return 1; | ||
| 266 | |||
| 267 | err |= __put_user(fpx.swd, &to_fp->status); | ||
| 268 | err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic); | ||
| 269 | if (err) | ||
| 270 | return 1; | ||
| 271 | |||
| 272 | if (copy_to_user(&to_fp->_fxsr_env[0], &fpx, | ||
| 273 | sizeof(struct user_fxsr_struct))) | ||
| 274 | return 1; | ||
| 275 | } | ||
| 276 | else { | ||
| 277 | struct user_i387_struct fp; | ||
| 278 | |||
| 279 | err = save_fp_registers(pid, (unsigned long *) &fp); | ||
| 280 | if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct))) | ||
| 281 | return 1; | ||
| 282 | } | ||
| 283 | |||
| 284 | return copy_to_user(to, &sc, sizeof(sc)); | ||
| 285 | } | ||
| 286 | |||
| 287 | static int copy_ucontext_to_user(struct ucontext __user *uc, | ||
| 288 | struct _fpstate __user *fp, sigset_t *set, | ||
| 289 | unsigned long sp) | ||
| 290 | { | ||
| 291 | int err = 0; | ||
| 292 | |||
| 293 | err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); | ||
| 294 | err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); | ||
| 295 | err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); | ||
| 296 | err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, sp); | ||
| 297 | err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); | ||
| 298 | return err; | ||
| 299 | } | ||
| 300 | |||
| 301 | struct sigframe | ||
| 302 | { | ||
| 303 | char __user *pretcode; | ||
| 304 | int sig; | ||
| 305 | struct sigcontext sc; | ||
| 306 | struct _fpstate fpstate; | ||
| 307 | unsigned long extramask[_NSIG_WORDS-1]; | ||
| 308 | char retcode[8]; | ||
| 309 | }; | ||
| 310 | |||
| 311 | struct rt_sigframe | ||
| 312 | { | ||
| 313 | char __user *pretcode; | ||
| 314 | int sig; | ||
| 315 | struct siginfo __user *pinfo; | ||
| 316 | void __user *puc; | ||
| 317 | struct siginfo info; | ||
| 318 | struct ucontext uc; | ||
| 319 | struct _fpstate fpstate; | ||
| 320 | char retcode[8]; | ||
| 321 | }; | ||
| 322 | |||
| 323 | int setup_signal_stack_sc(unsigned long stack_top, int sig, | ||
| 324 | struct k_sigaction *ka, struct pt_regs *regs, | ||
| 325 | sigset_t *mask) | ||
| 326 | { | ||
| 327 | struct sigframe __user *frame; | ||
| 328 | void __user *restorer; | ||
| 329 | unsigned long save_sp = PT_REGS_SP(regs); | ||
| 330 | int err = 0; | ||
| 331 | |||
| 332 | /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ | ||
| 333 | stack_top = ((stack_top + 4) & -16UL) - 4; | ||
| 334 | frame = (struct sigframe __user *) stack_top - 1; | ||
| 335 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
| 336 | return 1; | ||
| 337 | |||
| 338 | restorer = frame->retcode; | ||
| 339 | if (ka->sa.sa_flags & SA_RESTORER) | ||
| 340 | restorer = ka->sa.sa_restorer; | ||
| 341 | |||
| 342 | /* Update SP now because the page fault handler refuses to extend | ||
| 343 | * the stack if the faulting address is too far below the current | ||
| 344 | * SP, which frame now certainly is. If there's an error, the original | ||
| 345 | * value is restored on the way out. | ||
| 346 | * When writing the sigcontext to the stack, we have to write the | ||
| 347 | * original value, so that's passed to copy_sc_to_user, which does | ||
| 348 | * the right thing with it. | ||
| 349 | */ | ||
| 350 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
| 351 | |||
| 352 | err |= __put_user(restorer, &frame->pretcode); | ||
| 353 | err |= __put_user(sig, &frame->sig); | ||
| 354 | err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp); | ||
| 355 | err |= __put_user(mask->sig[0], &frame->sc.oldmask); | ||
| 356 | if (_NSIG_WORDS > 1) | ||
| 357 | err |= __copy_to_user(&frame->extramask, &mask->sig[1], | ||
| 358 | sizeof(frame->extramask)); | ||
| 359 | |||
| 360 | /* | ||
| 361 | * This is popl %eax ; movl $,%eax ; int $0x80 | ||
| 362 | * | ||
| 363 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
| 364 | * reasons and because gdb uses it as a signature to notice | ||
| 365 | * signal handler stack frames. | ||
| 366 | */ | ||
| 367 | err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); | ||
| 368 | err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); | ||
| 369 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | ||
| 370 | |||
| 371 | if (err) | ||
| 372 | goto err; | ||
| 373 | |||
| 374 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
| 375 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
| 376 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
| 377 | PT_REGS_EDX(regs) = (unsigned long) 0; | ||
| 378 | PT_REGS_ECX(regs) = (unsigned long) 0; | ||
| 379 | |||
| 380 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
| 381 | ptrace_notify(SIGTRAP); | ||
| 382 | return 0; | ||
| 383 | |||
| 384 | err: | ||
| 385 | PT_REGS_SP(regs) = save_sp; | ||
| 386 | return err; | ||
| 387 | } | ||
| 388 | |||
| 389 | int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
| 390 | struct k_sigaction *ka, struct pt_regs *regs, | ||
| 391 | siginfo_t *info, sigset_t *mask) | ||
| 392 | { | ||
| 393 | struct rt_sigframe __user *frame; | ||
| 394 | void __user *restorer; | ||
| 395 | unsigned long save_sp = PT_REGS_SP(regs); | ||
| 396 | int err = 0; | ||
| 397 | |||
| 398 | stack_top &= -8UL; | ||
| 399 | frame = (struct rt_sigframe __user *) stack_top - 1; | ||
| 400 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
| 401 | return 1; | ||
| 402 | |||
| 403 | restorer = frame->retcode; | ||
| 404 | if (ka->sa.sa_flags & SA_RESTORER) | ||
| 405 | restorer = ka->sa.sa_restorer; | ||
| 406 | |||
| 407 | /* See comment above about why this is here */ | ||
| 408 | PT_REGS_SP(regs) = (unsigned long) frame; | ||
| 409 | |||
| 410 | err |= __put_user(restorer, &frame->pretcode); | ||
| 411 | err |= __put_user(sig, &frame->sig); | ||
| 412 | err |= __put_user(&frame->info, &frame->pinfo); | ||
| 413 | err |= __put_user(&frame->uc, &frame->puc); | ||
| 414 | err |= copy_siginfo_to_user(&frame->info, info); | ||
| 415 | err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, | ||
| 416 | save_sp); | ||
| 417 | |||
| 418 | /* | ||
| 419 | * This is movl $,%eax ; int $0x80 | ||
| 420 | * | ||
| 421 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | ||
| 422 | * reasons and because gdb uses it as a signature to notice | ||
| 423 | * signal handler stack frames. | ||
| 424 | */ | ||
| 425 | err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); | ||
| 426 | err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); | ||
| 427 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | ||
| 428 | |||
| 429 | if (err) | ||
| 430 | goto err; | ||
| 431 | |||
| 432 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | ||
| 433 | PT_REGS_EAX(regs) = (unsigned long) sig; | ||
| 434 | PT_REGS_EDX(regs) = (unsigned long) &frame->info; | ||
| 435 | PT_REGS_ECX(regs) = (unsigned long) &frame->uc; | ||
| 436 | |||
| 437 | if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) | ||
| 438 | ptrace_notify(SIGTRAP); | ||
| 439 | return 0; | ||
| 440 | |||
| 441 | err: | ||
| 442 | PT_REGS_SP(regs) = save_sp; | ||
| 443 | return err; | ||
| 444 | } | ||
| 445 | |||
| 446 | long sys_sigreturn(struct pt_regs regs) | ||
| 447 | { | ||
| 448 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
| 449 | struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); | ||
| 450 | sigset_t set; | ||
| 451 | struct sigcontext __user *sc = &frame->sc; | ||
| 452 | unsigned long __user *oldmask = &sc->oldmask; | ||
| 453 | unsigned long __user *extramask = frame->extramask; | ||
| 454 | int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); | ||
| 455 | |||
| 456 | if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || | ||
| 457 | copy_from_user(&set.sig[1], extramask, sig_size)) | ||
| 458 | goto segfault; | ||
| 459 | |||
| 460 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
| 461 | |||
| 462 | spin_lock_irq(¤t->sighand->siglock); | ||
| 463 | current->blocked = set; | ||
| 464 | recalc_sigpending(); | ||
| 465 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 466 | |||
| 467 | if (copy_sc_from_user(¤t->thread.regs, sc)) | ||
| 468 | goto segfault; | ||
| 469 | |||
| 470 | /* Avoid ERESTART handling */ | ||
| 471 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
| 472 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
| 473 | |||
| 474 | segfault: | ||
| 475 | force_sig(SIGSEGV, current); | ||
| 476 | return 0; | ||
| 477 | } | ||
| 478 | |||
| 479 | long sys_rt_sigreturn(struct pt_regs regs) | ||
| 480 | { | ||
| 481 | unsigned long sp = PT_REGS_SP(¤t->thread.regs); | ||
| 482 | struct rt_sigframe __user *frame = | ||
| 483 | (struct rt_sigframe __user *) (sp - 4); | ||
| 484 | sigset_t set; | ||
| 485 | struct ucontext __user *uc = &frame->uc; | ||
| 486 | int sig_size = _NSIG_WORDS * sizeof(unsigned long); | ||
| 487 | |||
| 488 | if (copy_from_user(&set, &uc->uc_sigmask, sig_size)) | ||
| 489 | goto segfault; | ||
| 490 | |||
| 491 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
| 492 | |||
| 493 | spin_lock_irq(¤t->sighand->siglock); | ||
| 494 | current->blocked = set; | ||
| 495 | recalc_sigpending(); | ||
| 496 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 497 | |||
| 498 | if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) | ||
| 499 | goto segfault; | ||
| 500 | |||
| 501 | /* Avoid ERESTART handling */ | ||
| 502 | PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; | ||
| 503 | return PT_REGS_SYSCALL_RET(¤t->thread.regs); | ||
| 504 | |||
| 505 | segfault: | ||
| 506 | force_sig(SIGSEGV, current); | ||
| 507 | return 0; | ||
| 508 | } | ||
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S new file mode 100644 index 00000000000..54a36ec20cb --- /dev/null +++ b/arch/um/sys-i386/stub.S | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #include "as-layout.h" | ||
| 2 | |||
| 3 | .globl syscall_stub | ||
| 4 | .section .__syscall_stub, "ax" | ||
| 5 | |||
| 6 | .globl batch_syscall_stub | ||
| 7 | batch_syscall_stub: | ||
| 8 | /* load pointer to first operation */ | ||
| 9 | mov $(STUB_DATA+8), %esp | ||
| 10 | |||
| 11 | again: | ||
| 12 | /* load length of additional data */ | ||
| 13 | mov 0x0(%esp), %eax | ||
| 14 | |||
| 15 | /* if(length == 0) : end of list */ | ||
| 16 | /* write possible 0 to header */ | ||
| 17 | mov %eax, STUB_DATA+4 | ||
| 18 | cmpl $0, %eax | ||
| 19 | jz done | ||
| 20 | |||
| 21 | /* save current pointer */ | ||
| 22 | mov %esp, STUB_DATA+4 | ||
| 23 | |||
| 24 | /* skip additional data */ | ||
| 25 | add %eax, %esp | ||
| 26 | |||
| 27 | /* load syscall-# */ | ||
| 28 | pop %eax | ||
| 29 | |||
| 30 | /* load syscall params */ | ||
| 31 | pop %ebx | ||
| 32 | pop %ecx | ||
| 33 | pop %edx | ||
| 34 | pop %esi | ||
| 35 | pop %edi | ||
| 36 | pop %ebp | ||
| 37 | |||
| 38 | /* execute syscall */ | ||
| 39 | int $0x80 | ||
| 40 | |||
| 41 | /* check return value */ | ||
| 42 | pop %ebx | ||
| 43 | cmp %ebx, %eax | ||
| 44 | je again | ||
| 45 | |||
| 46 | done: | ||
| 47 | /* save return value */ | ||
| 48 | mov %eax, STUB_DATA | ||
| 49 | |||
| 50 | /* stop */ | ||
| 51 | int3 | ||
diff --git a/arch/um/sys-i386/stub_segv.c b/arch/um/sys-i386/stub_segv.c new file mode 100644 index 00000000000..28ccf737a79 --- /dev/null +++ b/arch/um/sys-i386/stub_segv.c | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "sysdep/stub.h" | ||
| 7 | #include "sysdep/sigcontext.h" | ||
| 8 | |||
| 9 | void __attribute__ ((__section__ (".__syscall_stub"))) | ||
| 10 | stub_segv_handler(int sig) | ||
| 11 | { | ||
| 12 | struct sigcontext *sc = (struct sigcontext *) (&sig + 1); | ||
| 13 | |||
| 14 | GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc); | ||
| 15 | |||
| 16 | trap_myself(); | ||
| 17 | } | ||
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S new file mode 100644 index 00000000000..de274071455 --- /dev/null +++ b/arch/um/sys-i386/sys_call_table.S | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #include <linux/linkage.h> | ||
| 2 | /* Steal i386 syscall table for our purposes, but with some slight changes.*/ | ||
| 3 | |||
| 4 | #define sys_iopl sys_ni_syscall | ||
| 5 | #define sys_ioperm sys_ni_syscall | ||
| 6 | |||
| 7 | #define sys_vm86old sys_ni_syscall | ||
| 8 | #define sys_vm86 sys_ni_syscall | ||
| 9 | |||
| 10 | #define old_mmap sys_old_mmap | ||
| 11 | |||
| 12 | #define ptregs_fork sys_fork | ||
| 13 | #define ptregs_execve sys_execve | ||
| 14 | #define ptregs_iopl sys_iopl | ||
| 15 | #define ptregs_vm86old sys_vm86old | ||
| 16 | #define ptregs_sigreturn sys_sigreturn | ||
| 17 | #define ptregs_clone sys_clone | ||
| 18 | #define ptregs_vm86 sys_vm86 | ||
| 19 | #define ptregs_rt_sigreturn sys_rt_sigreturn | ||
| 20 | #define ptregs_sigaltstack sys_sigaltstack | ||
| 21 | #define ptregs_vfork sys_vfork | ||
| 22 | |||
| 23 | .section .rodata,"a" | ||
| 24 | |||
| 25 | #include "../../x86/kernel/syscall_table_32.S" | ||
| 26 | |||
| 27 | ENTRY(syscall_table_size) | ||
| 28 | .long .-sys_call_table | ||
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c new file mode 100644 index 00000000000..70ca357393b --- /dev/null +++ b/arch/um/sys-i386/syscalls.c | |||
| @@ -0,0 +1,66 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "linux/sched.h" | ||
| 7 | #include "linux/shm.h" | ||
| 8 | #include "linux/ipc.h" | ||
| 9 | #include "linux/syscalls.h" | ||
| 10 | #include "asm/mman.h" | ||
| 11 | #include "asm/uaccess.h" | ||
| 12 | #include "asm/unistd.h" | ||
| 13 | |||
| 14 | /* | ||
| 15 | * The prototype on i386 is: | ||
| 16 | * | ||
| 17 | * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) | ||
| 18 | * | ||
| 19 | * and the "newtls" arg. on i386 is read by copy_thread directly from the | ||
| 20 | * register saved on the stack. | ||
| 21 | */ | ||
| 22 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
| 23 | int __user *parent_tid, void *newtls, int __user *child_tid) | ||
| 24 | { | ||
| 25 | long ret; | ||
| 26 | |||
| 27 | if (!newsp) | ||
| 28 | newsp = UPT_SP(¤t->thread.regs.regs); | ||
| 29 | |||
| 30 | current->thread.forking = 1; | ||
| 31 | ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, | ||
| 32 | child_tid); | ||
| 33 | current->thread.forking = 0; | ||
| 34 | return ret; | ||
| 35 | } | ||
| 36 | |||
| 37 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
| 38 | struct old_sigaction __user *oact) | ||
| 39 | { | ||
| 40 | struct k_sigaction new_ka, old_ka; | ||
| 41 | int ret; | ||
| 42 | |||
| 43 | if (act) { | ||
| 44 | old_sigset_t mask; | ||
| 45 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
| 46 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
| 47 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
| 48 | return -EFAULT; | ||
| 49 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
| 50 | __get_user(mask, &act->sa_mask); | ||
| 51 | siginitset(&new_ka.sa.sa_mask, mask); | ||
| 52 | } | ||
| 53 | |||
| 54 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
| 55 | |||
| 56 | if (!ret && oact) { | ||
| 57 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
| 58 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
| 59 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
| 60 | return -EFAULT; | ||
| 61 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
| 62 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
| 63 | } | ||
| 64 | |||
| 65 | return ret; | ||
| 66 | } | ||
diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c new file mode 100644 index 00000000000..171b3e9dc86 --- /dev/null +++ b/arch/um/sys-i386/sysrq.c | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com) | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "linux/kernel.h" | ||
| 7 | #include "linux/smp.h" | ||
| 8 | #include "linux/sched.h" | ||
| 9 | #include "linux/kallsyms.h" | ||
| 10 | #include "asm/ptrace.h" | ||
| 11 | #include "sysrq.h" | ||
| 12 | |||
| 13 | /* This is declared by <linux/sched.h> */ | ||
| 14 | void show_regs(struct pt_regs *regs) | ||
| 15 | { | ||
| 16 | printk("\n"); | ||
| 17 | printk("EIP: %04lx:[<%08lx>] CPU: %d %s", | ||
| 18 | 0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs), | ||
| 19 | smp_processor_id(), print_tainted()); | ||
| 20 | if (PT_REGS_CS(regs) & 3) | ||
| 21 | printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs), | ||
| 22 | PT_REGS_SP(regs)); | ||
| 23 | printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs), | ||
| 24 | print_tainted()); | ||
| 25 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", | ||
| 26 | PT_REGS_EAX(regs), PT_REGS_EBX(regs), | ||
| 27 | PT_REGS_ECX(regs), | ||
| 28 | PT_REGS_EDX(regs)); | ||
| 29 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", | ||
| 30 | PT_REGS_ESI(regs), PT_REGS_EDI(regs), | ||
| 31 | PT_REGS_EBP(regs)); | ||
| 32 | printk(" DS: %04lx ES: %04lx\n", | ||
| 33 | 0xffff & PT_REGS_DS(regs), | ||
| 34 | 0xffff & PT_REGS_ES(regs)); | ||
| 35 | |||
| 36 | show_trace(NULL, (unsigned long *) ®s); | ||
| 37 | } | ||
| 38 | |||
| 39 | /* Copied from i386. */ | ||
| 40 | static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) | ||
| 41 | { | ||
| 42 | return p > (void *)tinfo && | ||
| 43 | p < (void *)tinfo + THREAD_SIZE - 3; | ||
| 44 | } | ||
| 45 | |||
| 46 | /* Adapted from i386 (we also print the address we read from). */ | ||
| 47 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | ||
| 48 | unsigned long *stack, unsigned long ebp) | ||
| 49 | { | ||
| 50 | unsigned long addr; | ||
| 51 | |||
| 52 | #ifdef CONFIG_FRAME_POINTER | ||
| 53 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | ||
| 54 | addr = *(unsigned long *)(ebp + 4); | ||
| 55 | printk("%08lx: [<%08lx>]", ebp + 4, addr); | ||
| 56 | print_symbol(" %s", addr); | ||
| 57 | printk("\n"); | ||
| 58 | ebp = *(unsigned long *)ebp; | ||
| 59 | } | ||
| 60 | #else | ||
| 61 | while (valid_stack_ptr(tinfo, stack)) { | ||
| 62 | addr = *stack; | ||
| 63 | if (__kernel_text_address(addr)) { | ||
| 64 | printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); | ||
| 65 | print_symbol(" %s", addr); | ||
| 66 | printk("\n"); | ||
| 67 | } | ||
| 68 | stack++; | ||
| 69 | } | ||
| 70 | #endif | ||
| 71 | return ebp; | ||
| 72 | } | ||
| 73 | |||
| 74 | void show_trace(struct task_struct* task, unsigned long * stack) | ||
| 75 | { | ||
| 76 | unsigned long ebp; | ||
| 77 | struct thread_info *context; | ||
| 78 | |||
| 79 | /* Turn this into BUG_ON if possible. */ | ||
| 80 | if (!stack) { | ||
| 81 | stack = (unsigned long*) &stack; | ||
| 82 | printk("show_trace: got NULL stack, implicit assumption task == current"); | ||
| 83 | WARN_ON(1); | ||
| 84 | } | ||
| 85 | |||
| 86 | if (!task) | ||
| 87 | task = current; | ||
| 88 | |||
| 89 | if (task != current) { | ||
| 90 | ebp = (unsigned long) KSTK_EBP(task); | ||
| 91 | } else { | ||
| 92 | asm ("movl %%ebp, %0" : "=r" (ebp) : ); | ||
| 93 | } | ||
| 94 | |||
| 95 | context = (struct thread_info *) | ||
| 96 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
| 97 | print_context_stack(context, stack, ebp); | ||
| 98 | |||
| 99 | printk("\n"); | ||
| 100 | } | ||
| 101 | |||
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c new file mode 100644 index 00000000000..c6c7131e563 --- /dev/null +++ b/arch/um/sys-i386/tls.c | |||
| @@ -0,0 +1,396 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | ||
| 3 | * Licensed under the GPL | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include "linux/percpu.h" | ||
| 7 | #include "linux/sched.h" | ||
| 8 | #include "asm/uaccess.h" | ||
| 9 | #include "os.h" | ||
| 10 | #include "skas.h" | ||
| 11 | #include "sysdep/tls.h" | ||
| 12 | |||
| 13 | /* | ||
| 14 | * If needed we can detect when it's uninitialized. | ||
| 15 | * | ||
| 16 | * These are initialized in an initcall and unchanged thereafter. | ||
| 17 | */ | ||
| 18 | static int host_supports_tls = -1; | ||
| 19 | int host_gdt_entry_tls_min; | ||
| 20 | |||
| 21 | int do_set_thread_area(struct user_desc *info) | ||
| 22 | { | ||
| 23 | int ret; | ||
| 24 | u32 cpu; | ||
| 25 | |||
| 26 | cpu = get_cpu(); | ||
| 27 | ret = os_set_thread_area(info, userspace_pid[cpu]); | ||
| 28 | put_cpu(); | ||
| 29 | |||
| 30 | if (ret) | ||
| 31 | printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, " | ||
| 32 | "index = %d\n", ret, info->entry_number); | ||
| 33 | |||
| 34 | return ret; | ||
| 35 | } | ||
| 36 | |||
| 37 | int do_get_thread_area(struct user_desc *info) | ||
| 38 | { | ||
| 39 | int ret; | ||
| 40 | u32 cpu; | ||
| 41 | |||
| 42 | cpu = get_cpu(); | ||
| 43 | ret = os_get_thread_area(info, userspace_pid[cpu]); | ||
| 44 | put_cpu(); | ||
| 45 | |||
| 46 | if (ret) | ||
| 47 | printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, " | ||
| 48 | "index = %d\n", ret, info->entry_number); | ||
| 49 | |||
| 50 | return ret; | ||
| 51 | } | ||
| 52 | |||
| 53 | /* | ||
| 54 | * sys_get_thread_area: get a yet unused TLS descriptor index. | ||
| 55 | * XXX: Consider leaving one free slot for glibc usage at first place. This must | ||
| 56 | * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else. | ||
| 57 | * | ||
| 58 | * Also, this must be tested when compiling in SKAS mode with dynamic linking | ||
| 59 | * and running against NPTL. | ||
| 60 | */ | ||
| 61 | static int get_free_idx(struct task_struct* task) | ||
| 62 | { | ||
| 63 | struct thread_struct *t = &task->thread; | ||
| 64 | int idx; | ||
| 65 | |||
| 66 | if (!t->arch.tls_array) | ||
| 67 | return GDT_ENTRY_TLS_MIN; | ||
| 68 | |||
| 69 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | ||
| 70 | if (!t->arch.tls_array[idx].present) | ||
| 71 | return idx + GDT_ENTRY_TLS_MIN; | ||
| 72 | return -ESRCH; | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline void clear_user_desc(struct user_desc* info) | ||
| 76 | { | ||
| 77 | /* Postcondition: LDT_empty(info) returns true. */ | ||
| 78 | memset(info, 0, sizeof(*info)); | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain | ||
| 82 | * indeed an empty user_desc. | ||
| 83 | */ | ||
| 84 | info->read_exec_only = 1; | ||
| 85 | info->seg_not_present = 1; | ||
| 86 | } | ||
| 87 | |||
| 88 | #define O_FORCE 1 | ||
| 89 | |||
| 90 | static int load_TLS(int flags, struct task_struct *to) | ||
| 91 | { | ||
| 92 | int ret = 0; | ||
| 93 | int idx; | ||
| 94 | |||
| 95 | for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) { | ||
| 96 | struct uml_tls_struct* curr = | ||
| 97 | &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Actually, now if it wasn't flushed it gets cleared and | ||
| 101 | * flushed to the host, which will clear it. | ||
| 102 | */ | ||
| 103 | if (!curr->present) { | ||
| 104 | if (!curr->flushed) { | ||
| 105 | clear_user_desc(&curr->tls); | ||
| 106 | curr->tls.entry_number = idx; | ||
| 107 | } else { | ||
| 108 | WARN_ON(!LDT_empty(&curr->tls)); | ||
| 109 | continue; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | if (!(flags & O_FORCE) && curr->flushed) | ||
| 114 | continue; | ||
| 115 | |||
| 116 | ret = do_set_thread_area(&curr->tls); | ||
| 117 | if (ret) | ||
| 118 | goto out; | ||
| 119 | |||
| 120 | curr->flushed = 1; | ||
| 121 | } | ||
| 122 | out: | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Verify if we need to do a flush for the new process, i.e. if there are any | ||
| 128 | * present desc's, only if they haven't been flushed. | ||
| 129 | */ | ||
| 130 | static inline int needs_TLS_update(struct task_struct *task) | ||
| 131 | { | ||
| 132 | int i; | ||
| 133 | int ret = 0; | ||
| 134 | |||
| 135 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
| 136 | struct uml_tls_struct* curr = | ||
| 137 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
| 138 | |||
| 139 | /* | ||
| 140 | * Can't test curr->present, we may need to clear a descriptor | ||
| 141 | * which had a value. | ||
| 142 | */ | ||
| 143 | if (curr->flushed) | ||
| 144 | continue; | ||
| 145 | ret = 1; | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | return ret; | ||
| 149 | } | ||
| 150 | |||
| 151 | /* | ||
| 152 | * On a newly forked process, the TLS descriptors haven't yet been flushed. So | ||
| 153 | * we mark them as such and the first switch_to will do the job. | ||
| 154 | */ | ||
| 155 | void clear_flushed_tls(struct task_struct *task) | ||
| 156 | { | ||
| 157 | int i; | ||
| 158 | |||
| 159 | for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) { | ||
| 160 | struct uml_tls_struct* curr = | ||
| 161 | &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; | ||
| 162 | |||
| 163 | /* | ||
| 164 | * Still correct to do this, if it wasn't present on the host it | ||
| 165 | * will remain as flushed as it was. | ||
| 166 | */ | ||
| 167 | if (!curr->present) | ||
| 168 | continue; | ||
| 169 | |||
| 170 | curr->flushed = 0; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a | ||
| 176 | * common host process. So this is needed in SKAS0 too. | ||
| 177 | * | ||
| 178 | * However, if each thread had a different host process (and this was discussed | ||
| 179 | * for SMP support) this won't be needed. | ||
| 180 | * | ||
| 181 | * And this will not need be used when (and if) we'll add support to the host | ||
| 182 | * SKAS patch. | ||
| 183 | */ | ||
| 184 | |||
| 185 | int arch_switch_tls(struct task_struct *to) | ||
| 186 | { | ||
| 187 | if (!host_supports_tls) | ||
| 188 | return 0; | ||
| 189 | |||
| 190 | /* | ||
| 191 | * We have no need whatsoever to switch TLS for kernel threads; beyond | ||
| 192 | * that, that would also result in us calling os_set_thread_area with | ||
| 193 | * userspace_pid[cpu] == 0, which gives an error. | ||
| 194 | */ | ||
| 195 | if (likely(to->mm)) | ||
| 196 | return load_TLS(O_FORCE, to); | ||
| 197 | |||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | static int set_tls_entry(struct task_struct* task, struct user_desc *info, | ||
| 202 | int idx, int flushed) | ||
| 203 | { | ||
| 204 | struct thread_struct *t = &task->thread; | ||
| 205 | |||
| 206 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
| 207 | return -EINVAL; | ||
| 208 | |||
| 209 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info; | ||
| 210 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1; | ||
| 211 | t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed; | ||
| 212 | |||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | int arch_copy_tls(struct task_struct *new) | ||
| 217 | { | ||
| 218 | struct user_desc info; | ||
| 219 | int idx, ret = -EFAULT; | ||
| 220 | |||
| 221 | if (copy_from_user(&info, | ||
| 222 | (void __user *) UPT_ESI(&new->thread.regs.regs), | ||
| 223 | sizeof(info))) | ||
| 224 | goto out; | ||
| 225 | |||
| 226 | ret = -EINVAL; | ||
| 227 | if (LDT_empty(&info)) | ||
| 228 | goto out; | ||
| 229 | |||
| 230 | idx = info.entry_number; | ||
| 231 | |||
| 232 | ret = set_tls_entry(new, &info, idx, 0); | ||
| 233 | out: | ||
| 234 | return ret; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */ | ||
| 238 | static int get_tls_entry(struct task_struct *task, struct user_desc *info, | ||
| 239 | int idx) | ||
| 240 | { | ||
| 241 | struct thread_struct *t = &task->thread; | ||
| 242 | |||
| 243 | if (!t->arch.tls_array) | ||
| 244 | goto clear; | ||
| 245 | |||
| 246 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | ||
| 247 | return -EINVAL; | ||
| 248 | |||
| 249 | if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present) | ||
| 250 | goto clear; | ||
| 251 | |||
| 252 | *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; | ||
| 253 | |||
| 254 | out: | ||
| 255 | /* | ||
| 256 | * Temporary debugging check, to make sure that things have been | ||
| 257 | * flushed. This could be triggered if load_TLS() failed. | ||
| 258 | */ | ||
| 259 | if (unlikely(task == current && | ||
| 260 | !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { | ||
| 261 | printk(KERN_ERR "get_tls_entry: task with pid %d got here " | ||
| 262 | "without flushed TLS.", current->pid); | ||
| 263 | } | ||
| 264 | |||
| 265 | return 0; | ||
| 266 | clear: | ||
| 267 | /* | ||
| 268 | * When the TLS entry has not been set, the values read to user in the | ||
| 269 | * tls_array are 0 (because it's cleared at boot, see | ||
| 270 | * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that. | ||
| 271 | */ | ||
| 272 | clear_user_desc(info); | ||
| 273 | info->entry_number = idx; | ||
| 274 | goto out; | ||
| 275 | } | ||
| 276 | |||
| 277 | int sys_set_thread_area(struct user_desc __user *user_desc) | ||
| 278 | { | ||
| 279 | struct user_desc info; | ||
| 280 | int idx, ret; | ||
| 281 | |||
| 282 | if (!host_supports_tls) | ||
| 283 | return -ENOSYS; | ||
| 284 | |||
| 285 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
| 286 | return -EFAULT; | ||
| 287 | |||
| 288 | idx = info.entry_number; | ||
| 289 | |||
| 290 | if (idx == -1) { | ||
| 291 | idx = get_free_idx(current); | ||
| 292 | if (idx < 0) | ||
| 293 | return idx; | ||
| 294 | info.entry_number = idx; | ||
| 295 | /* Tell the user which slot we chose for him.*/ | ||
| 296 | if (put_user(idx, &user_desc->entry_number)) | ||
| 297 | return -EFAULT; | ||
| 298 | } | ||
| 299 | |||
| 300 | ret = do_set_thread_area(&info); | ||
| 301 | if (ret) | ||
| 302 | return ret; | ||
| 303 | return set_tls_entry(current, &info, idx, 1); | ||
| 304 | } | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Perform set_thread_area on behalf of the traced child. | ||
| 308 | * Note: error handling is not done on the deferred load, and this differ from | ||
| 309 | * i386. However the only possible error are caused by bugs. | ||
| 310 | */ | ||
| 311 | int ptrace_set_thread_area(struct task_struct *child, int idx, | ||
| 312 | struct user_desc __user *user_desc) | ||
| 313 | { | ||
| 314 | struct user_desc info; | ||
| 315 | |||
| 316 | if (!host_supports_tls) | ||
| 317 | return -EIO; | ||
| 318 | |||
| 319 | if (copy_from_user(&info, user_desc, sizeof(info))) | ||
| 320 | return -EFAULT; | ||
| 321 | |||
| 322 | return set_tls_entry(child, &info, idx, 0); | ||
| 323 | } | ||
| 324 | |||
| 325 | int sys_get_thread_area(struct user_desc __user *user_desc) | ||
| 326 | { | ||
| 327 | struct user_desc info; | ||
| 328 | int idx, ret; | ||
| 329 | |||
| 330 | if (!host_supports_tls) | ||
| 331 | return -ENOSYS; | ||
| 332 | |||
| 333 | if (get_user(idx, &user_desc->entry_number)) | ||
| 334 | return -EFAULT; | ||
| 335 | |||
| 336 | ret = get_tls_entry(current, &info, idx); | ||
| 337 | if (ret < 0) | ||
| 338 | goto out; | ||
| 339 | |||
| 340 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
| 341 | ret = -EFAULT; | ||
| 342 | |||
| 343 | out: | ||
| 344 | return ret; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* | ||
| 348 | * Perform get_thread_area on behalf of the traced child. | ||
| 349 | */ | ||
| 350 | int ptrace_get_thread_area(struct task_struct *child, int idx, | ||
| 351 | struct user_desc __user *user_desc) | ||
| 352 | { | ||
| 353 | struct user_desc info; | ||
| 354 | int ret; | ||
| 355 | |||
| 356 | if (!host_supports_tls) | ||
| 357 | return -EIO; | ||
| 358 | |||
| 359 | ret = get_tls_entry(child, &info, idx); | ||
| 360 | if (ret < 0) | ||
| 361 | goto out; | ||
| 362 | |||
| 363 | if (copy_to_user(user_desc, &info, sizeof(info))) | ||
| 364 | ret = -EFAULT; | ||
| 365 | out: | ||
| 366 | return ret; | ||
| 367 | } | ||
| 368 | |||
| 369 | /* | ||
| 370 | * This code is really i386-only, but it detects and logs x86_64 GDT indexes | ||
| 371 | * if a 32-bit UML is running on a 64-bit host. | ||
| 372 | */ | ||
| 373 | static int __init __setup_host_supports_tls(void) | ||
| 374 | { | ||
| 375 | check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min); | ||
| 376 | if (host_supports_tls) { | ||
| 377 | printk(KERN_INFO "Host TLS support detected\n"); | ||
| 378 | printk(KERN_INFO "Detected host type: "); | ||
| 379 | switch (host_gdt_entry_tls_min) { | ||
| 380 | case GDT_ENTRY_TLS_MIN_I386: | ||
| 381 | printk(KERN_CONT "i386"); | ||
| 382 | break; | ||
| 383 | case GDT_ENTRY_TLS_MIN_X86_64: | ||
| 384 | printk(KERN_CONT "x86_64"); | ||
| 385 | break; | ||
| 386 | } | ||
| 387 | printk(KERN_CONT " (GDT indexes %d to %d)\n", | ||
| 388 | host_gdt_entry_tls_min, | ||
| 389 | host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES); | ||
| 390 | } else | ||
| 391 | printk(KERN_ERR " Host TLS support NOT detected! " | ||
| 392 | "TLS support inside UML will not work\n"); | ||
| 393 | return 0; | ||
| 394 | } | ||
| 395 | |||
| 396 | __initcall(__setup_host_supports_tls); | ||
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c new file mode 100644 index 00000000000..5f883bfe773 --- /dev/null +++ b/arch/um/sys-i386/user-offsets.c | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | #include <stdio.h> | ||
| 2 | #include <stddef.h> | ||
| 3 | #include <signal.h> | ||
| 4 | #include <sys/poll.h> | ||
| 5 | #include <sys/user.h> | ||
| 6 | #include <sys/mman.h> | ||
| 7 | #include <asm/ptrace.h> | ||
| 8 | |||
| 9 | #define DEFINE(sym, val) \ | ||
| 10 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
| 11 | |||
| 12 | #define DEFINE_LONGS(sym, val) \ | ||
| 13 | asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) | ||
| 14 | |||
| 15 | #define OFFSET(sym, str, mem) \ | ||
| 16 | DEFINE(sym, offsetof(struct str, mem)); | ||
| 17 | |||
| 18 | void foo(void) | ||
| 19 | { | ||
| 20 | OFFSET(HOST_SC_TRAPNO, sigcontext, trapno); | ||
| 21 | OFFSET(HOST_SC_ERR, sigcontext, err); | ||
| 22 | OFFSET(HOST_SC_CR2, sigcontext, cr2); | ||
| 23 | |||
| 24 | DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct)); | ||
| 25 | DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct)); | ||
| 26 | |||
| 27 | DEFINE(HOST_IP, EIP); | ||
| 28 | DEFINE(HOST_SP, UESP); | ||
| 29 | DEFINE(HOST_EFLAGS, EFL); | ||
| 30 | DEFINE(HOST_EAX, EAX); | ||
| 31 | DEFINE(HOST_EBX, EBX); | ||
| 32 | DEFINE(HOST_ECX, ECX); | ||
| 33 | DEFINE(HOST_EDX, EDX); | ||
| 34 | DEFINE(HOST_ESI, ESI); | ||
| 35 | DEFINE(HOST_EDI, EDI); | ||
| 36 | DEFINE(HOST_EBP, EBP); | ||
| 37 | DEFINE(HOST_CS, CS); | ||
| 38 | DEFINE(HOST_SS, SS); | ||
| 39 | DEFINE(HOST_DS, DS); | ||
| 40 | DEFINE(HOST_FS, FS); | ||
| 41 | DEFINE(HOST_ES, ES); | ||
| 42 | DEFINE(HOST_GS, GS); | ||
| 43 | DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); | ||
| 44 | |||
| 45 | /* XXX Duplicated between i386 and x86_64 */ | ||
| 46 | DEFINE(UM_POLLIN, POLLIN); | ||
| 47 | DEFINE(UM_POLLPRI, POLLPRI); | ||
| 48 | DEFINE(UM_POLLOUT, POLLOUT); | ||
| 49 | |||
| 50 | DEFINE(UM_PROT_READ, PROT_READ); | ||
| 51 | DEFINE(UM_PROT_WRITE, PROT_WRITE); | ||
| 52 | DEFINE(UM_PROT_EXEC, PROT_EXEC); | ||
| 53 | } | ||
