aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/Kconfig.x8670
-rw-r--r--arch/um/Makefile-i38642
-rw-r--r--arch/um/Makefile-x86_6426
-rw-r--r--arch/um/include/asm/apic.h4
-rw-r--r--arch/um/include/asm/arch_hweight.h6
-rw-r--r--arch/um/include/asm/auxvec.h4
-rw-r--r--arch/um/include/asm/bug.h6
-rw-r--r--arch/um/include/asm/checksum.h6
-rw-r--r--arch/um/include/asm/cputime.h6
-rw-r--r--arch/um/include/asm/current.h13
-rw-r--r--arch/um/include/asm/delay.h18
-rw-r--r--arch/um/include/asm/desc.h16
-rw-r--r--arch/um/include/asm/device.h7
-rw-r--r--arch/um/include/asm/emergency-restart.h6
-rw-r--r--arch/um/include/asm/ftrace.h1
-rw-r--r--arch/um/include/asm/futex.h6
-rw-r--r--arch/um/include/asm/hardirq.h1
-rw-r--r--arch/um/include/asm/hw_irq.h7
-rw-r--r--arch/um/include/asm/io.h57
-rw-r--r--arch/um/include/asm/irq_regs.h1
-rw-r--r--arch/um/include/asm/irq_vectors.h10
-rw-r--r--arch/um/include/asm/kdebug.h1
-rw-r--r--arch/um/include/asm/mutex.h9
-rw-r--r--arch/um/include/asm/page_offset.h1
-rw-r--r--arch/um/include/asm/param.h20
-rw-r--r--arch/um/include/asm/pci.h6
-rw-r--r--arch/um/include/asm/pda.h21
-rw-r--r--arch/um/include/asm/percpu.h6
-rw-r--r--arch/um/include/asm/required-features.h9
-rw-r--r--arch/um/include/asm/sections.h7
-rw-r--r--arch/um/include/asm/segment.h10
-rw-r--r--arch/um/include/asm/system.h47
-rw-r--r--arch/um/include/asm/topology.h6
-rw-r--r--arch/um/include/asm/xor.h6
-rw-r--r--arch/um/include/shared/chan_kern.h50
-rw-r--r--arch/um/include/shared/chan_user.h55
-rw-r--r--arch/um/include/shared/initrd.h12
-rw-r--r--arch/um/include/shared/ldt.h37
-rw-r--r--arch/um/include/shared/line.h106
-rw-r--r--arch/um/include/shared/mconsole.h98
-rw-r--r--arch/um/include/shared/mconsole_kern.h52
-rw-r--r--arch/um/include/shared/mem_kern.h20
-rw-r--r--arch/um/include/shared/process.h17
-rw-r--r--arch/um/include/shared/skas_ptregs.h6
-rw-r--r--arch/um/include/shared/syscall.h12
-rw-r--r--arch/um/include/shared/sysrq.h7
-rw-r--r--arch/um/include/shared/task.h9
-rw-r--r--arch/um/include/shared/tlb.h15
-rw-r--r--arch/um/include/shared/ubd_user.h16
-rw-r--r--arch/um/include/shared/um_mmu.h24
-rw-r--r--arch/um/include/shared/um_uaccess.h97
-rw-r--r--arch/um/kernel/init_task.c38
-rw-r--r--arch/um/kernel/internal.h1
-rw-r--r--arch/um/kernel/uaccess.c33
-rw-r--r--arch/um/os-Linux/sys-i386/Makefile10
-rw-r--r--arch/um/os-Linux/sys-i386/registers.c91
-rw-r--r--arch/um/os-Linux/sys-i386/signal.c13
-rw-r--r--arch/um/os-Linux/sys-i386/task_size.c139
-rw-r--r--arch/um/os-Linux/sys-i386/tls.c36
-rw-r--r--arch/um/os-Linux/sys-x86_64/Makefile10
-rw-r--r--arch/um/os-Linux/sys-x86_64/prctl.c12
-rw-r--r--arch/um/os-Linux/sys-x86_64/registers.c52
-rw-r--r--arch/um/os-Linux/sys-x86_64/signal.c16
-rw-r--r--arch/um/os-Linux/sys-x86_64/task_size.c5
-rw-r--r--arch/um/os-Linux/tls.c35
-rw-r--r--arch/um/os-Linux/uaccess.c32
-rw-r--r--arch/um/sys-i386/Makefile24
-rw-r--r--arch/um/sys-i386/asm/archparam.h16
-rw-r--r--arch/um/sys-i386/asm/elf.h125
-rw-r--r--arch/um/sys-i386/asm/module.h13
-rw-r--r--arch/um/sys-i386/asm/processor.h78
-rw-r--r--arch/um/sys-i386/asm/ptrace.h51
-rw-r--r--arch/um/sys-i386/atomic64_cx8_32.S225
-rw-r--r--arch/um/sys-i386/bug.c21
-rw-r--r--arch/um/sys-i386/bugs.c76
-rw-r--r--arch/um/sys-i386/checksum.S458
-rw-r--r--arch/um/sys-i386/delay.c60
-rw-r--r--arch/um/sys-i386/elfcore.c83
-rw-r--r--arch/um/sys-i386/fault.c28
-rw-r--r--arch/um/sys-i386/ksyms.c5
-rw-r--r--arch/um/sys-i386/ldt.c502
-rw-r--r--arch/um/sys-i386/mem.c62
-rw-r--r--arch/um/sys-i386/ptrace.c228
-rw-r--r--arch/um/sys-i386/ptrace_user.c21
-rw-r--r--arch/um/sys-i386/setjmp.S58
-rw-r--r--arch/um/sys-i386/shared/sysdep/archsetjmp.h22
-rw-r--r--arch/um/sys-i386/shared/sysdep/barrier.h9
-rw-r--r--arch/um/sys-i386/shared/sysdep/checksum.h201
-rw-r--r--arch/um/sys-i386/shared/sysdep/faultinfo.h29
-rw-r--r--arch/um/sys-i386/shared/sysdep/host_ldt.h34
-rw-r--r--arch/um/sys-i386/shared/sysdep/kernel-offsets.h21
-rw-r--r--arch/um/sys-i386/shared/sysdep/ptrace.h171
-rw-r--r--arch/um/sys-i386/shared/sysdep/ptrace_user.h50
-rw-r--r--arch/um/sys-i386/shared/sysdep/sc.h44
-rw-r--r--arch/um/sys-i386/shared/sysdep/sigcontext.h26
-rw-r--r--arch/um/sys-i386/shared/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-i386/shared/sysdep/stub.h101
-rw-r--r--arch/um/sys-i386/shared/sysdep/syscalls.h20
-rw-r--r--arch/um/sys-i386/shared/sysdep/system.h132
-rw-r--r--arch/um/sys-i386/shared/sysdep/tls.h32
-rw-r--r--arch/um/sys-i386/shared/sysdep/vm-flags.h14
-rw-r--r--arch/um/sys-i386/signal.c508
-rw-r--r--arch/um/sys-i386/stub.S51
-rw-r--r--arch/um/sys-i386/stub_segv.c17
-rw-r--r--arch/um/sys-i386/sys_call_table.S28
-rw-r--r--arch/um/sys-i386/syscalls.c66
-rw-r--r--arch/um/sys-i386/sysrq.c101
-rw-r--r--arch/um/sys-i386/tls.c396
-rw-r--r--arch/um/sys-i386/user-offsets.c53
-rw-r--r--arch/um/sys-x86_64/Makefile27
-rw-r--r--arch/um/sys-x86_64/asm/archparam.h16
-rw-r--r--arch/um/sys-x86_64/asm/elf.h132
-rw-r--r--arch/um/sys-x86_64/asm/module.h20
-rw-r--r--arch/um/sys-x86_64/asm/processor.h56
-rw-r--r--arch/um/sys-x86_64/asm/ptrace.h72
-rw-r--r--arch/um/sys-x86_64/bug.c21
-rw-r--r--arch/um/sys-x86_64/bugs.c15
-rw-r--r--arch/um/sys-x86_64/delay.c60
-rw-r--r--arch/um/sys-x86_64/fault.c28
-rw-r--r--arch/um/sys-x86_64/ksyms.c11
-rw-r--r--arch/um/sys-x86_64/mem.c26
-rw-r--r--arch/um/sys-x86_64/ptrace.c198
-rw-r--r--arch/um/sys-x86_64/ptrace_user.c22
-rw-r--r--arch/um/sys-x86_64/setjmp.S54
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/archsetjmp.h24
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/barrier.h7
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/checksum.h144
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/faultinfo.h29
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/host_ldt.h38
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/kernel-offsets.h23
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/ptrace.h240
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/ptrace_user.h77
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/sc.h45
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/sigcontext.h27
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/stub.h107
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/syscalls.h33
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/system.h132
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/tls.h29
-rw-r--r--arch/um/sys-x86_64/shared/sysdep/vm-flags.h15
-rw-r--r--arch/um/sys-x86_64/signal.c290
-rw-r--r--arch/um/sys-x86_64/stub.S66
-rw-r--r--arch/um/sys-x86_64/stub_segv.c22
-rw-r--r--arch/um/sys-x86_64/syscall_table.c65
-rw-r--r--arch/um/sys-x86_64/syscalls.c102
-rw-r--r--arch/um/sys-x86_64/sysrq.c41
-rw-r--r--arch/um/sys-x86_64/tls.c17
-rw-r--r--arch/um/sys-x86_64/user-offsets.c65
-rw-r--r--arch/um/sys-x86_64/vdso/Makefile90
-rw-r--r--arch/um/sys-x86_64/vdso/checkundef.sh10
-rw-r--r--arch/um/sys-x86_64/vdso/um_vdso.c71
-rw-r--r--arch/um/sys-x86_64/vdso/vdso-layout.lds.S64
-rw-r--r--arch/um/sys-x86_64/vdso/vdso-note.S12
-rw-r--r--arch/um/sys-x86_64/vdso/vdso.S10
-rw-r--r--arch/um/sys-x86_64/vdso/vdso.lds.S32
-rw-r--r--arch/um/sys-x86_64/vdso/vma.c74
156 files changed, 8681 insertions, 0 deletions
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
new file mode 100644
index 00000000000..21bebe63df6
--- /dev/null
+++ b/arch/um/Kconfig.x86
@@ -0,0 +1,70 @@
1mainmenu "User Mode Linux/$SUBARCH $KERNELVERSION Kernel Configuration"
2
3source "arch/um/Kconfig.common"
4
5menu "UML-specific options"
6
7menu "Host processor type and features"
8
9config CMPXCHG_LOCAL
10 bool
11 default n
12
13config CMPXCHG_DOUBLE
14 bool
15 default n
16
17source "arch/x86/Kconfig.cpu"
18
19endmenu
20
21config UML_X86
22 def_bool y
23 select GENERIC_FIND_FIRST_BIT
24
25config 64BIT
26 bool
27 default SUBARCH = "x86_64"
28
29config X86_32
30 def_bool !64BIT
31 select HAVE_AOUT
32
33config X86_64
34 def_bool 64BIT
35
36config RWSEM_XCHGADD_ALGORITHM
37 def_bool X86_XADD && 64BIT
38
39config RWSEM_GENERIC_SPINLOCK
40 def_bool !RWSEM_XCHGADD_ALGORITHM
41
42config 3_LEVEL_PGTABLES
43 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
44 default 64BIT
45 depends on EXPERIMENTAL
46 help
47 Three-level pagetables will let UML have more than 4G of physical
48 memory. All the memory that can't be mapped directly will be treated
49 as high memory.
50
51 However, this it experimental on 32-bit architectures, so if unsure say
52 N (on x86-64 it's automatically enabled, instead, as it's safe there).
53
54config ARCH_HAS_SC_SIGNALS
55 def_bool !64BIT
56
57config ARCH_REUSE_HOST_VSYSCALL_AREA
58 def_bool !64BIT
59
60config SMP_BROKEN
61 def_bool 64BIT
62
63config GENERIC_HWEIGHT
64 def_bool y
65
66source "arch/um/Kconfig.um"
67
68endmenu
69
70source "arch/um/Kconfig.rest"
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
new file mode 100644
index 00000000000..302cbe50454
--- /dev/null
+++ b/arch/um/Makefile-i386
@@ -0,0 +1,42 @@
1core-y += arch/um/sys-i386/ arch/x86/crypto/
2
3TOP_ADDR := $(CONFIG_TOP_ADDR)
4
5START := 0x8048000
6
7LDFLAGS += -m elf_i386
8ELF_ARCH := $(SUBARCH)
9ELF_FORMAT := elf32-$(SUBARCH)
10OBJCOPYFLAGS := -O binary -R .note -R .comment -S
11HEADER_ARCH := x86
12CHECKFLAGS += -D__i386__
13
14ifeq ("$(origin SUBARCH)", "command line")
15ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)")
16KBUILD_CFLAGS += $(call cc-option,-m32)
17KBUILD_AFLAGS += $(call cc-option,-m32)
18LINK-y += $(call cc-option,-m32)
19UML_OBJCOPYFLAGS += -F $(ELF_FORMAT)
20
21export LDFLAGS HOSTCFLAGS HOSTLDFLAGS UML_OBJCOPYFLAGS
22endif
23endif
24
25# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
26include $(srctree)/arch/x86/Makefile_32.cpu
27
28# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
29cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
30
31# Prevent sprintf in nfsd from being converted to strcpy and resulting in
32# an unresolved reference.
33cflags-y += -ffreestanding
34
35# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
36# a lot more stack due to the lack of sharing of stacklots. Also, gcc
37# 4.3.0 needs -funit-at-a-time for extern inline functions.
38KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
39 echo $(call cc-option,-fno-unit-at-a-time); \
40 else echo $(call cc-option,-funit-at-a-time); fi ;)
41
42KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
new file mode 100644
index 00000000000..a9cd7e77a7a
--- /dev/null
+++ b/arch/um/Makefile-x86_64
@@ -0,0 +1,26 @@
1# Copyright 2003 - 2004 Pathscale, Inc
2# Released under the GPL
3
4core-y += arch/um/sys-x86_64/ arch/x86/crypto/
5START := 0x60000000
6
7_extra_flags_ = -fno-builtin -m64
8
9KBUILD_CFLAGS += $(_extra_flags_)
10
11CHECKFLAGS += -m64 -D__x86_64__
12KBUILD_AFLAGS += -m64
13LDFLAGS += -m elf_x86_64
14KBUILD_CPPFLAGS += -m64
15
16ELF_ARCH := i386:x86-64
17ELF_FORMAT := elf64-x86-64
18HEADER_ARCH := x86
19
20# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
21
22LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
23LINK-y += -m64
24
25# Do unit-at-a-time unconditionally on x86_64, following the host
26KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
diff --git a/arch/um/include/asm/apic.h b/arch/um/include/asm/apic.h
new file mode 100644
index 00000000000..876dee84ab1
--- /dev/null
+++ b/arch/um/include/asm/apic.h
@@ -0,0 +1,4 @@
1#ifndef __UM_APIC_H
2#define __UM_APIC_H
3
4#endif
diff --git a/arch/um/include/asm/arch_hweight.h b/arch/um/include/asm/arch_hweight.h
new file mode 100644
index 00000000000..c656cf443f4
--- /dev/null
+++ b/arch/um/include/asm/arch_hweight.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_UM_HWEIGHT_H
2#define _ASM_UM_HWEIGHT_H
3
4#include <asm-generic/bitops/arch_hweight.h>
5
6#endif
diff --git a/arch/um/include/asm/auxvec.h b/arch/um/include/asm/auxvec.h
new file mode 100644
index 00000000000..1e5e1c2fc9b
--- /dev/null
+++ b/arch/um/include/asm/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __UM_AUXVEC_H
2#define __UM_AUXVEC_H
3
4#endif
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644
index 00000000000..9e33b864c35
--- /dev/null
+++ b/arch/um/include/asm/bug.h
@@ -0,0 +1,6 @@
1#ifndef __UM_BUG_H
2#define __UM_BUG_H
3
4#include <asm-generic/bug.h>
5
6#endif
diff --git a/arch/um/include/asm/checksum.h b/arch/um/include/asm/checksum.h
new file mode 100644
index 00000000000..5b501361e36
--- /dev/null
+++ b/arch/um/include/asm/checksum.h
@@ -0,0 +1,6 @@
1#ifndef __UM_CHECKSUM_H
2#define __UM_CHECKSUM_H
3
4#include "sysdep/checksum.h"
5
6#endif
diff --git a/arch/um/include/asm/cputime.h b/arch/um/include/asm/cputime.h
new file mode 100644
index 00000000000..c84acbadfa2
--- /dev/null
+++ b/arch/um/include/asm/cputime.h
@@ -0,0 +1,6 @@
1#ifndef __UM_CPUTIME_H
2#define __UM_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __UM_CPUTIME_H */
diff --git a/arch/um/include/asm/current.h b/arch/um/include/asm/current.h
new file mode 100644
index 00000000000..c2191d9aa03
--- /dev/null
+++ b/arch/um/include/asm/current.h
@@ -0,0 +1,13 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_CURRENT_H
7#define __UM_CURRENT_H
8
9#include "linux/thread_info.h"
10
11#define current (current_thread_info()->task)
12
13#endif
diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h
new file mode 100644
index 00000000000..8a5576d8eda
--- /dev/null
+++ b/arch/um/include/asm/delay.h
@@ -0,0 +1,18 @@
1#ifndef __UM_DELAY_H
2#define __UM_DELAY_H
3
4/* Undefined on purpose */
5extern void __bad_udelay(void);
6extern void __bad_ndelay(void);
7
8extern void __udelay(unsigned long usecs);
9extern void __ndelay(unsigned long usecs);
10extern void __delay(unsigned long loops);
11
12#define udelay(n) ((__builtin_constant_p(n) && (n) > 20000) ? \
13 __bad_udelay() : __udelay(n))
14
15#define ndelay(n) ((__builtin_constant_p(n) && (n) > 20000) ? \
16 __bad_ndelay() : __ndelay(n))
17
18#endif
diff --git a/arch/um/include/asm/desc.h b/arch/um/include/asm/desc.h
new file mode 100644
index 00000000000..4ec34a51b62
--- /dev/null
+++ b/arch/um/include/asm/desc.h
@@ -0,0 +1,16 @@
1#ifndef __UM_DESC_H
2#define __UM_DESC_H
3
4/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
5 * compile, and has never been used. */
6#define LDT_empty(info) (\
7 (info)->base_addr == 0 && \
8 (info)->limit == 0 && \
9 (info)->contents == 0 && \
10 (info)->read_exec_only == 1 && \
11 (info)->seg_32bit == 0 && \
12 (info)->limit_in_pages == 0 && \
13 (info)->seg_not_present == 1 && \
14 (info)->useable == 0 )
15
16#endif
diff --git a/arch/um/include/asm/device.h b/arch/um/include/asm/device.h
new file mode 100644
index 00000000000..d8f9872b0e2
--- /dev/null
+++ b/arch/um/include/asm/device.h
@@ -0,0 +1,7 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/arch/um/include/asm/emergency-restart.h b/arch/um/include/asm/emergency-restart.h
new file mode 100644
index 00000000000..108d8c48e42
--- /dev/null
+++ b/arch/um/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/um/include/asm/ftrace.h b/arch/um/include/asm/ftrace.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/arch/um/include/asm/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/um/include/asm/futex.h b/arch/um/include/asm/futex.h
new file mode 100644
index 00000000000..6a332a9f099
--- /dev/null
+++ b/arch/um/include/asm/futex.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h
new file mode 100644
index 00000000000..fb3c05a0cbb
--- /dev/null
+++ b/arch/um/include/asm/hardirq.h
@@ -0,0 +1 @@
#include <asm-generic/hardirq.h>
diff --git a/arch/um/include/asm/hw_irq.h b/arch/um/include/asm/hw_irq.h
new file mode 100644
index 00000000000..1cf84cf5f21
--- /dev/null
+++ b/arch/um/include/asm/hw_irq.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_UM_HW_IRQ_H
2#define _ASM_UM_HW_IRQ_H
3
4#include "asm/irq.h"
5#include "asm/archparam.h"
6
7#endif
diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h
new file mode 100644
index 00000000000..44e8b8c772a
--- /dev/null
+++ b/arch/um/include/asm/io.h
@@ -0,0 +1,57 @@
1#ifndef __UM_IO_H
2#define __UM_IO_H
3
4#include "asm/page.h"
5
6#define IO_SPACE_LIMIT 0xdeadbeef /* Sure hope nothing uses this */
7
8static inline int inb(unsigned long i) { return(0); }
9static inline void outb(char c, unsigned long i) { }
10
11/*
12 * Change virtual addresses to physical addresses and vv.
13 * These are pretty trivial
14 */
15static inline unsigned long virt_to_phys(volatile void * address)
16{
17 return __pa((void *) address);
18}
19
20static inline void * phys_to_virt(unsigned long address)
21{
22 return __va(address);
23}
24
25/*
26 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
27 * access
28 */
29#define xlate_dev_mem_ptr(p) __va(p)
30
31/*
32 * Convert a virtual cached pointer to an uncached pointer
33 */
34#define xlate_dev_kmem_ptr(p) p
35
36static inline void writeb(unsigned char b, volatile void __iomem *addr)
37{
38 *(volatile unsigned char __force *) addr = b;
39}
40static inline void writew(unsigned short b, volatile void __iomem *addr)
41{
42 *(volatile unsigned short __force *) addr = b;
43}
44static inline void writel(unsigned int b, volatile void __iomem *addr)
45{
46 *(volatile unsigned int __force *) addr = b;
47}
48static inline void writeq(unsigned int b, volatile void __iomem *addr)
49{
50 *(volatile unsigned long long __force *) addr = b;
51}
52#define __raw_writeb writeb
53#define __raw_writew writew
54#define __raw_writel writel
55#define __raw_writeq writeq
56
57#endif
diff --git a/arch/um/include/asm/irq_regs.h b/arch/um/include/asm/irq_regs.h
new file mode 100644
index 00000000000..3dd9c0b7027
--- /dev/null
+++ b/arch/um/include/asm/irq_regs.h
@@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>
diff --git a/arch/um/include/asm/irq_vectors.h b/arch/um/include/asm/irq_vectors.h
new file mode 100644
index 00000000000..272a81e0ce1
--- /dev/null
+++ b/arch/um/include/asm/irq_vectors.h
@@ -0,0 +1,10 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_IRQ_VECTORS_H
7#define __UM_IRQ_VECTORS_H
8
9#endif
10
diff --git a/arch/um/include/asm/kdebug.h b/arch/um/include/asm/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/arch/um/include/asm/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/arch/um/include/asm/mutex.h b/arch/um/include/asm/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/arch/um/include/asm/mutex.h
@@ -0,0 +1,9 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/um/include/asm/page_offset.h b/arch/um/include/asm/page_offset.h
new file mode 100644
index 00000000000..1c168dfbf35
--- /dev/null
+++ b/arch/um/include/asm/page_offset.h
@@ -0,0 +1 @@
#define PAGE_OFFSET_RAW (uml_physmem)
diff --git a/arch/um/include/asm/param.h b/arch/um/include/asm/param.h
new file mode 100644
index 00000000000..e44f4e60d16
--- /dev/null
+++ b/arch/um/include/asm/param.h
@@ -0,0 +1,20 @@
1#ifndef _UM_PARAM_H
2#define _UM_PARAM_H
3
4#define EXEC_PAGESIZE 4096
5
6#ifndef NOGROUP
7#define NOGROUP (-1)
8#endif
9
10#define MAXHOSTNAMELEN 64 /* max length of hostname */
11
12#ifdef __KERNEL__
13#define HZ CONFIG_HZ
14#define USER_HZ 100 /* .. some user interfaces are in "ticks" */
15#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
16#else
17#define HZ 100
18#endif
19
20#endif
diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h
new file mode 100644
index 00000000000..b44cf59ede1
--- /dev/null
+++ b/arch/um/include/asm/pci.h
@@ -0,0 +1,6 @@
1#ifndef __UM_PCI_H
2#define __UM_PCI_H
3
4#define PCI_DMA_BUS_IS_PHYS (1)
5
6#endif
diff --git a/arch/um/include/asm/pda.h b/arch/um/include/asm/pda.h
new file mode 100644
index 00000000000..ddcd774fc2a
--- /dev/null
+++ b/arch/um/include/asm/pda.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PDA_X86_64_H
8#define __UM_PDA_X86_64_H
9
10/* XXX */
11struct foo {
12 unsigned int __softirq_pending;
13 unsigned int __nmi_count;
14};
15
16extern struct foo me;
17
18#define read_pda(me) (&me)
19
20#endif
21
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h
new file mode 100644
index 00000000000..efe7508d8ab
--- /dev/null
+++ b/arch/um/include/asm/percpu.h
@@ -0,0 +1,6 @@
1#ifndef __UM_PERCPU_H
2#define __UM_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __UM_PERCPU_H */
diff --git a/arch/um/include/asm/required-features.h b/arch/um/include/asm/required-features.h
new file mode 100644
index 00000000000..dfb967b2d2f
--- /dev/null
+++ b/arch/um/include/asm/required-features.h
@@ -0,0 +1,9 @@
1#ifndef __UM_REQUIRED_FEATURES_H
2#define __UM_REQUIRED_FEATURES_H
3
4/*
5 * Nothing to see, just need something for the i386 and x86_64 asm
6 * headers to include.
7 */
8
9#endif
diff --git a/arch/um/include/asm/sections.h b/arch/um/include/asm/sections.h
new file mode 100644
index 00000000000..6b0231eefea
--- /dev/null
+++ b/arch/um/include/asm/sections.h
@@ -0,0 +1,7 @@
1#ifndef _UM_SECTIONS_H
2#define _UM_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/arch/um/include/asm/segment.h b/arch/um/include/asm/segment.h
new file mode 100644
index 00000000000..45183fcd10b
--- /dev/null
+++ b/arch/um/include/asm/segment.h
@@ -0,0 +1,10 @@
1#ifndef __UM_SEGMENT_H
2#define __UM_SEGMENT_H
3
4extern int host_gdt_entry_tls_min;
5
6#define GDT_ENTRY_TLS_ENTRIES 3
7#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min
8#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9
10#endif
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
new file mode 100644
index 00000000000..68a90ecd145
--- /dev/null
+++ b/arch/um/include/asm/system.h
@@ -0,0 +1,47 @@
1#ifndef __UM_SYSTEM_GENERIC_H
2#define __UM_SYSTEM_GENERIC_H
3
4#include "sysdep/system.h"
5
6extern int get_signals(void);
7extern int set_signals(int enable);
8extern void block_signals(void);
9extern void unblock_signals(void);
10
11static inline unsigned long arch_local_save_flags(void)
12{
13 return get_signals();
14}
15
16static inline void arch_local_irq_restore(unsigned long flags)
17{
18 set_signals(flags);
19}
20
21static inline void arch_local_irq_enable(void)
22{
23 unblock_signals();
24}
25
26static inline void arch_local_irq_disable(void)
27{
28 block_signals();
29}
30
31static inline unsigned long arch_local_irq_save(void)
32{
33 unsigned long flags;
34 flags = arch_local_save_flags();
35 arch_local_irq_disable();
36 return flags;
37}
38
39static inline bool arch_irqs_disabled(void)
40{
41 return arch_local_save_flags() == 0;
42}
43
44extern void *_switch_to(void *prev, void *next, void *last);
45#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
46
47#endif
diff --git a/arch/um/include/asm/topology.h b/arch/um/include/asm/topology.h
new file mode 100644
index 00000000000..0905e4f21d4
--- /dev/null
+++ b/arch/um/include/asm/topology.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_UM_TOPOLOGY_H
2#define _ASM_UM_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif
diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h
new file mode 100644
index 00000000000..a19db3e1724
--- /dev/null
+++ b/arch/um/include/asm/xor.h
@@ -0,0 +1,6 @@
1#ifndef __UM_XOR_H
2#define __UM_XOR_H
3
4#include "asm-generic/xor.h"
5
6#endif
diff --git a/arch/um/include/shared/chan_kern.h b/arch/um/include/shared/chan_kern.h
new file mode 100644
index 00000000000..1e651457e04
--- /dev/null
+++ b/arch/um/include/shared/chan_kern.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __CHAN_KERN_H__
7#define __CHAN_KERN_H__
8
9#include "linux/tty.h"
10#include "linux/list.h"
11#include "linux/console.h"
12#include "chan_user.h"
13#include "line.h"
14
15struct chan {
16 struct list_head list;
17 struct list_head free_list;
18 struct line *line;
19 char *dev;
20 unsigned int primary:1;
21 unsigned int input:1;
22 unsigned int output:1;
23 unsigned int opened:1;
24 unsigned int enabled:1;
25 int fd;
26 const struct chan_ops *ops;
27 void *data;
28};
29
30extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
31 struct tty_struct *tty, int irq);
32extern int parse_chan_pair(char *str, struct line *line, int device,
33 const struct chan_opts *opts, char **error_out);
34extern int write_chan(struct list_head *chans, const char *buf, int len,
35 int write_irq);
36extern int console_write_chan(struct list_head *chans, const char *buf,
37 int len);
38extern int console_open_chan(struct line *line, struct console *co);
39extern void deactivate_chan(struct list_head *chans, int irq);
40extern void reactivate_chan(struct list_head *chans, int irq);
41extern void chan_enable_winch(struct list_head *chans, struct tty_struct *tty);
42extern int enable_chan(struct line *line);
43extern void close_chan(struct list_head *chans, int delay_free_irq);
44extern int chan_window_size(struct list_head *chans,
45 unsigned short *rows_out,
46 unsigned short *cols_out);
47extern int chan_config_string(struct list_head *chans, char *str, int size,
48 char **error_out);
49
50#endif
diff --git a/arch/um/include/shared/chan_user.h b/arch/um/include/shared/chan_user.h
new file mode 100644
index 00000000000..9b9ced85b70
--- /dev/null
+++ b/arch/um/include/shared/chan_user.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __CHAN_USER_H__
7#define __CHAN_USER_H__
8
9#include "init.h"
10
11struct chan_opts {
12 void (*const announce)(char *dev_name, int dev);
13 char *xterm_title;
14 const int raw;
15};
16
17enum chan_init_pri { INIT_STATIC, INIT_ALL, INIT_ONE };
18
19struct chan_ops {
20 char *type;
21 void *(*init)(char *, int, const struct chan_opts *);
22 int (*open)(int, int, int, void *, char **);
23 void (*close)(int, void *);
24 int (*read)(int, char *, void *);
25 int (*write)(int, const char *, int, void *);
26 int (*console_write)(int, const char *, int);
27 int (*window_size)(int, void *, unsigned short *, unsigned short *);
28 void (*free)(void *);
29 int winch;
30};
31
32extern const struct chan_ops fd_ops, null_ops, port_ops, pts_ops, pty_ops,
33 tty_ops, xterm_ops;
34
35extern void generic_close(int fd, void *unused);
36extern int generic_read(int fd, char *c_out, void *unused);
37extern int generic_write(int fd, const char *buf, int n, void *unused);
38extern int generic_console_write(int fd, const char *buf, int n);
39extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
40 unsigned short *cols_out);
41extern void generic_free(void *data);
42
43struct tty_struct;
44extern void register_winch(int fd, struct tty_struct *tty);
45extern void register_winch_irq(int fd, int tty_fd, int pid,
46 struct tty_struct *tty, unsigned long stack);
47
48#define __channel_help(fn, prefix) \
49__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
50" Attach a console or serial line to a host channel. See\n" \
51" http://user-mode-linux.sourceforge.net/old/input.html for a complete\n" \
52" description of this switch.\n\n" \
53);
54
55#endif
diff --git a/arch/um/include/shared/initrd.h b/arch/um/include/shared/initrd.h
new file mode 100644
index 00000000000..22673bcc273
--- /dev/null
+++ b/arch/um/include/shared/initrd.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __INITRD_USER_H__
7#define __INITRD_USER_H__
8
9extern int load_initrd(char *filename, void *buf, int size);
10
11#endif
12
diff --git a/arch/um/include/shared/ldt.h b/arch/um/include/shared/ldt.h
new file mode 100644
index 00000000000..a7f999a5877
--- /dev/null
+++ b/arch/um/include/shared/ldt.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_H
9#define __ASM_LDT_H
10
11#include <linux/mutex.h>
12#include <sysdep/host_ldt.h>
13
14extern void ldt_host_info(void);
15
16#define LDT_PAGES_MAX \
17 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
18#define LDT_ENTRIES_PER_PAGE \
19 (PAGE_SIZE/LDT_ENTRY_SIZE)
20#define LDT_DIRECT_ENTRIES \
21 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
22
23struct ldt_entry {
24 __u32 a;
25 __u32 b;
26};
27
28typedef struct uml_ldt {
29 int entry_count;
30 struct mutex lock;
31 union {
32 struct ldt_entry * pages[LDT_PAGES_MAX];
33 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
34 } u;
35} uml_ldt_t;
36
37#endif
diff --git a/arch/um/include/shared/line.h b/arch/um/include/shared/line.h
new file mode 100644
index 00000000000..63df3ca02ac
--- /dev/null
+++ b/arch/um/include/shared/line.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __LINE_H__
7#define __LINE_H__
8
9#include "linux/list.h"
10#include "linux/workqueue.h"
11#include "linux/tty.h"
12#include "linux/interrupt.h"
13#include "linux/spinlock.h"
14#include "linux/mutex.h"
15#include "chan_user.h"
16#include "mconsole_kern.h"
17
18/* There's only one modifiable field in this - .mc.list */
19struct line_driver {
20 const char *name;
21 const char *device_name;
22 const short major;
23 const short minor_start;
24 const short type;
25 const short subtype;
26 const int read_irq;
27 const char *read_irq_name;
28 const int write_irq;
29 const char *write_irq_name;
30 struct mc_device mc;
31};
32
33struct line {
34 struct tty_struct *tty;
35 spinlock_t count_lock;
36 unsigned long count;
37 int valid;
38
39 char *init_str;
40 int init_pri;
41 struct list_head chan_list;
42
43 /*This lock is actually, mostly, local to*/
44 spinlock_t lock;
45 int throttled;
46 /* Yes, this is a real circular buffer.
47 * XXX: And this should become a struct kfifo!
48 *
49 * buffer points to a buffer allocated on demand, of length
50 * LINE_BUFSIZE, head to the start of the ring, tail to the end.*/
51 char *buffer;
52 char *head;
53 char *tail;
54
55 int sigio;
56 struct delayed_work task;
57 const struct line_driver *driver;
58 int have_irq;
59};
60
61#define LINE_INIT(str, d) \
62 { .count_lock = __SPIN_LOCK_UNLOCKED((str).count_lock), \
63 .init_str = str, \
64 .init_pri = INIT_STATIC, \
65 .valid = 1, \
66 .lock = __SPIN_LOCK_UNLOCKED((str).lock), \
67 .driver = d }
68
69extern void line_close(struct tty_struct *tty, struct file * filp);
70extern int line_open(struct line *lines, struct tty_struct *tty);
71extern int line_setup(struct line *lines, unsigned int sizeof_lines,
72 char *init, char **error_out);
73extern int line_write(struct tty_struct *tty, const unsigned char *buf,
74 int len);
75extern int line_put_char(struct tty_struct *tty, unsigned char ch);
76extern void line_set_termios(struct tty_struct *tty, struct ktermios * old);
77extern int line_chars_in_buffer(struct tty_struct *tty);
78extern void line_flush_buffer(struct tty_struct *tty);
79extern void line_flush_chars(struct tty_struct *tty);
80extern int line_write_room(struct tty_struct *tty);
81extern int line_ioctl(struct tty_struct *tty, unsigned int cmd,
82 unsigned long arg);
83extern void line_throttle(struct tty_struct *tty);
84extern void line_unthrottle(struct tty_struct *tty);
85
86extern char *add_xterm_umid(char *base);
87extern int line_setup_irq(int fd, int input, int output, struct line *line,
88 void *data);
89extern void line_close_chan(struct line *line);
90extern struct tty_driver *register_lines(struct line_driver *line_driver,
91 const struct tty_operations *driver,
92 struct line *lines, int nlines);
93extern void lines_init(struct line *lines, int nlines, struct chan_opts *opts);
94extern void close_lines(struct line *lines, int nlines);
95
96extern int line_config(struct line *lines, unsigned int sizeof_lines,
97 char *str, const struct chan_opts *opts,
98 char **error_out);
99extern int line_id(char **str, int *start_out, int *end_out);
100extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n,
101 char **error_out);
102extern int line_get_config(char *dev, struct line *lines,
103 unsigned int sizeof_lines, char *str,
104 int size, char **error_out);
105
106#endif
diff --git a/arch/um/include/shared/mconsole.h b/arch/um/include/shared/mconsole.h
new file mode 100644
index 00000000000..c139ae1d682
--- /dev/null
+++ b/arch/um/include/shared/mconsole.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org)
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#ifndef __MCONSOLE_H__
8#define __MCONSOLE_H__
9
10#ifndef __KERNEL__
11#include <stdint.h>
12#define u32 uint32_t
13#endif
14
15#include "sysdep/ptrace.h"
16
17#define MCONSOLE_MAGIC (0xcafebabe)
18#define MCONSOLE_MAX_DATA (512)
19#define MCONSOLE_VERSION 2
20
21struct mconsole_request {
22 u32 magic;
23 u32 version;
24 u32 len;
25 char data[MCONSOLE_MAX_DATA];
26};
27
28struct mconsole_reply {
29 u32 err;
30 u32 more;
31 u32 len;
32 char data[MCONSOLE_MAX_DATA];
33};
34
35struct mconsole_notify {
36 u32 magic;
37 u32 version;
38 enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG,
39 MCONSOLE_USER_NOTIFY } type;
40 u32 len;
41 char data[MCONSOLE_MAX_DATA];
42};
43
44struct mc_request;
45
46enum mc_context { MCONSOLE_INTR, MCONSOLE_PROC };
47
48struct mconsole_command
49{
50 char *command;
51 void (*handler)(struct mc_request *req);
52 enum mc_context context;
53};
54
55struct mc_request
56{
57 int len;
58 int as_interrupt;
59
60 int originating_fd;
61 unsigned int originlen;
62 unsigned char origin[128]; /* sockaddr_un */
63
64 struct mconsole_request request;
65 struct mconsole_command *cmd;
66 struct uml_pt_regs regs;
67};
68
69extern char mconsole_socket_name[];
70
71extern int mconsole_unlink_socket(void);
72extern int mconsole_reply_len(struct mc_request *req, const char *reply,
73 int len, int err, int more);
74extern int mconsole_reply(struct mc_request *req, const char *str, int err,
75 int more);
76
77extern void mconsole_version(struct mc_request *req);
78extern void mconsole_help(struct mc_request *req);
79extern void mconsole_halt(struct mc_request *req);
80extern void mconsole_reboot(struct mc_request *req);
81extern void mconsole_config(struct mc_request *req);
82extern void mconsole_remove(struct mc_request *req);
83extern void mconsole_sysrq(struct mc_request *req);
84extern void mconsole_cad(struct mc_request *req);
85extern void mconsole_stop(struct mc_request *req);
86extern void mconsole_go(struct mc_request *req);
87extern void mconsole_log(struct mc_request *req);
88extern void mconsole_proc(struct mc_request *req);
89extern void mconsole_stack(struct mc_request *req);
90
91extern int mconsole_get_request(int fd, struct mc_request *req);
92extern int mconsole_notify(char *sock_name, int type, const void *data,
93 int len);
94extern char *mconsole_notify_socket(void);
95extern void lock_notify(void);
96extern void unlock_notify(void);
97
98#endif
diff --git a/arch/um/include/shared/mconsole_kern.h b/arch/um/include/shared/mconsole_kern.h
new file mode 100644
index 00000000000..d2fe07e7895
--- /dev/null
+++ b/arch/um/include/shared/mconsole_kern.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MCONSOLE_KERN_H__
7#define __MCONSOLE_KERN_H__
8
9#include "linux/list.h"
10#include "mconsole.h"
11
12struct mconsole_entry {
13 struct list_head list;
14 struct mc_request request;
15};
16
17/* All these methods are called in process context. */
18struct mc_device {
19 struct list_head list;
20 char *name;
21 int (*config)(char *, char **);
22 int (*get_config)(char *, char *, int, char **);
23 int (*id)(char **, int *, int *);
24 int (*remove)(int, char **);
25};
26
27#define CONFIG_CHUNK(str, size, current, chunk, end) \
28do { \
29 current += strlen(chunk); \
30 if(current >= size) \
31 str = NULL; \
32 if(str != NULL){ \
33 strcpy(str, chunk); \
34 str += strlen(chunk); \
35 } \
36 if(end) \
37 current++; \
38} while(0)
39
40#ifdef CONFIG_MCONSOLE
41
42extern void mconsole_register_dev(struct mc_device *new);
43
44#else
45
46static inline void mconsole_register_dev(struct mc_device *new)
47{
48}
49
50#endif
51
52#endif
diff --git a/arch/um/include/shared/mem_kern.h b/arch/um/include/shared/mem_kern.h
new file mode 100644
index 00000000000..69be0fd0ce4
--- /dev/null
+++ b/arch/um/include/shared/mem_kern.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __MEM_KERN_H__
7#define __MEM_KERN_H__
8
9#include "linux/list.h"
10#include "linux/types.h"
11
12struct remapper {
13 struct list_head list;
14 int (*proc)(int, unsigned long, int, __u64);
15};
16
17extern void register_remapper(struct remapper *info);
18
19#endif
20
diff --git a/arch/um/include/shared/process.h b/arch/um/include/shared/process.h
new file mode 100644
index 00000000000..bb873a51262
--- /dev/null
+++ b/arch/um/include/shared/process.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __PROCESS_H__
7#define __PROCESS_H__
8
9#include <signal.h>
10
11/* Copied from linux/compiler-gcc.h since we can't include it directly */
12#define barrier() __asm__ __volatile__("": : :"memory")
13
14extern void sig_handler(int sig, struct sigcontext *sc);
15extern void alarm_handler(int sig, struct sigcontext *sc);
16
17#endif
diff --git a/arch/um/include/shared/skas_ptregs.h b/arch/um/include/shared/skas_ptregs.h
new file mode 100644
index 00000000000..73db19e9c07
--- /dev/null
+++ b/arch/um/include/shared/skas_ptregs.h
@@ -0,0 +1,6 @@
1#ifndef __SKAS_PT_REGS_
2#define __SKAS_PT_REGS_
3
4#include <user_constants.h>
5
6#endif
diff --git a/arch/um/include/shared/syscall.h b/arch/um/include/shared/syscall.h
new file mode 100644
index 00000000000..dda1df901a0
--- /dev/null
+++ b/arch/um/include/shared/syscall.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSCALL_USER_H
7#define __SYSCALL_USER_H
8
9extern int record_syscall_start(int syscall);
10extern void record_syscall_end(int index, long result);
11
12#endif
diff --git a/arch/um/include/shared/sysrq.h b/arch/um/include/shared/sysrq.h
new file mode 100644
index 00000000000..c8d332b56b9
--- /dev/null
+++ b/arch/um/include/shared/sysrq.h
@@ -0,0 +1,7 @@
1#ifndef __UM_SYSRQ_H
2#define __UM_SYSRQ_H
3
4struct task_struct;
5extern void show_trace(struct task_struct* task, unsigned long *stack);
6
7#endif
diff --git a/arch/um/include/shared/task.h b/arch/um/include/shared/task.h
new file mode 100644
index 00000000000..3fe726b3cf4
--- /dev/null
+++ b/arch/um/include/shared/task.h
@@ -0,0 +1,9 @@
1#ifndef __TASK_H
2#define __TASK_H
3
4#include <kern_constants.h>
5
6#define TASK_REGS(task) ((struct uml_pt_regs *) &(((char *) (task))[HOST_TASK_REGS]))
7#define TASK_PID(task) *((int *) &(((char *) (task))[HOST_TASK_PID]))
8
9#endif
diff --git a/arch/um/include/shared/tlb.h b/arch/um/include/shared/tlb.h
new file mode 100644
index 00000000000..ecd2265b301
--- /dev/null
+++ b/arch/um/include/shared/tlb.h
@@ -0,0 +1,15 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __TLB_H__
7#define __TLB_H__
8
9#include "um_mmu.h"
10
11extern void force_flush_all(void);
12extern int flush_tlb_kernel_range_common(unsigned long start,
13 unsigned long end);
14
15#endif
diff --git a/arch/um/include/shared/ubd_user.h b/arch/um/include/shared/ubd_user.h
new file mode 100644
index 00000000000..3845051f1b1
--- /dev/null
+++ b/arch/um/include/shared/ubd_user.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Copyright (C) 2001 RidgeRun, Inc (glonnon@ridgerun.com)
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_UBD_USER_H
8#define __UM_UBD_USER_H
9
10extern void ignore_sigwinch_sig(void);
11extern int start_io_thread(unsigned long sp, int *fds_out);
12extern int io_thread(void *arg);
13extern int kernel_fd;
14
15#endif
16
diff --git a/arch/um/include/shared/um_mmu.h b/arch/um/include/shared/um_mmu.h
new file mode 100644
index 00000000000..b1a7e47d102
--- /dev/null
+++ b/arch/um/include/shared/um_mmu.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __ARCH_UM_MMU_H
7#define __ARCH_UM_MMU_H
8
9#include "mm_id.h"
10#include "ldt.h"
11
12typedef struct mm_context {
13 struct mm_id id;
14 struct uml_ldt ldt;
15 struct page **stub_pages;
16} mm_context_t;
17
18extern void __switch_mm(struct mm_id * mm_idp);
19
20/* Avoid tangled inclusion with asm/ldt.h */
21extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
22extern void free_ldt(struct mm_context *mm);
23
24#endif
diff --git a/arch/um/include/shared/um_uaccess.h b/arch/um/include/shared/um_uaccess.h
new file mode 100644
index 00000000000..45c04999d67
--- /dev/null
+++ b/arch/um/include/shared/um_uaccess.h
@@ -0,0 +1,97 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __ARCH_UM_UACCESS_H
7#define __ARCH_UM_UACCESS_H
8
9#include <asm/elf.h>
10#include <asm/fixmap.h>
11#include "sysdep/archsetjmp.h"
12
13#define __under_task_size(addr, size) \
14 (((unsigned long) (addr) < TASK_SIZE) && \
15 (((unsigned long) (addr) + (size)) < TASK_SIZE))
16
17#define __access_ok_vsyscall(type, addr, size) \
18 ((type == VERIFY_READ) && \
19 ((unsigned long) (addr) >= FIXADDR_USER_START) && \
20 ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
21 ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
22
23#define __addr_range_nowrap(addr, size) \
24 ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
25
26#define access_ok(type, addr, size) \
27 (__addr_range_nowrap(addr, size) && \
28 (__under_task_size(addr, size) || \
29 __access_ok_vsyscall(type, addr, size) || \
30 segment_eq(get_fs(), KERNEL_DS)))
31
32extern int copy_from_user(void *to, const void __user *from, int n);
33extern int copy_to_user(void __user *to, const void *from, int n);
34
35extern int __do_copy_to_user(void *to, const void *from, int n,
36 void **fault_addr, jmp_buf **fault_catcher);
37
38/*
39 * strncpy_from_user: - Copy a NUL terminated string from userspace.
40 * @dst: Destination address, in kernel space. This buffer must be at
41 * least @count bytes long.
42 * @src: Source address, in user space.
43 * @count: Maximum number of bytes to copy, including the trailing NUL.
44 *
45 * Copies a NUL-terminated string from userspace to kernel space.
46 *
47 * On success, returns the length of the string (not including the trailing
48 * NUL).
49 *
50 * If access to userspace fails, returns -EFAULT (some data may have been
51 * copied).
52 *
53 * If @count is smaller than the length of the string, copies @count bytes
54 * and returns @count.
55 */
56
57extern int strncpy_from_user(char *dst, const char __user *src, int count);
58
59/*
60 * __clear_user: - Zero a block of memory in user space, with less checking.
61 * @to: Destination address, in user space.
62 * @n: Number of bytes to zero.
63 *
64 * Zero a block of memory in user space. Caller must check
65 * the specified block with access_ok() before calling this function.
66 *
67 * Returns number of bytes that could not be cleared.
68 * On success, this will be zero.
69 */
70extern int __clear_user(void __user *mem, int len);
71
72/*
73 * clear_user: - Zero a block of memory in user space.
74 * @to: Destination address, in user space.
75 * @n: Number of bytes to zero.
76 *
77 * Zero a block of memory in user space.
78 *
79 * Returns number of bytes that could not be cleared.
80 * On success, this will be zero.
81 */
82extern int clear_user(void __user *mem, int len);
83
84/*
85 * strlen_user: - Get the size of a string in user space.
86 * @str: The string to measure.
87 * @n: The maximum valid length
88 *
89 * Get the size of a NUL-terminated string in user space.
90 *
91 * Returns the size of the string INCLUDING the terminating NUL.
92 * On exception, returns 0.
93 * If the string is too long, returns a value greater than @n.
94 */
95extern int strnlen_user(const void __user *str, int len);
96
97#endif
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
new file mode 100644
index 00000000000..ddc9698b66e
--- /dev/null
+++ b/arch/um/kernel/init_task.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,intel.linux}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/init_task.h"
8#include "linux/fs.h"
9#include "linux/module.h"
10#include "linux/mqueue.h"
11#include "asm/uaccess.h"
12
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15/*
16 * Initial task structure.
17 *
18 * All other task structs will be allocated on slabs in fork.c
19 */
20
21struct task_struct init_task = INIT_TASK(init_task);
22
23EXPORT_SYMBOL(init_task);
24
25/*
26 * Initial thread structure.
27 *
28 * We need to make sure that this is aligned due to the
29 * way process stacks are handled. This is done by having a special
30 * "init_task" linker map entry..
31 */
32
33union thread_union init_thread_union __init_task_data =
34 { INIT_THREAD_INFO(init_task) };
35
36union thread_union cpu0_irqstack
37 __attribute__((__section__(".data..init_irqstack"))) =
38 { INIT_THREAD_INFO(init_task) };
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h
new file mode 100644
index 00000000000..5bf97db24a0
--- /dev/null
+++ b/arch/um/kernel/internal.h
@@ -0,0 +1 @@
extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
diff --git a/arch/um/kernel/uaccess.c b/arch/um/kernel/uaccess.c
new file mode 100644
index 00000000000..dd33f040c52
--- /dev/null
+++ b/arch/um/kernel/uaccess.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7/*
8 * These are here rather than tt/uaccess.c because skas mode needs them in
9 * order to do SIGBUS recovery when a tmpfs mount runs out of room.
10 */
11
12#include <linux/string.h>
13#include "os.h"
14
15static void __do_copy(void *to, const void *from, int n)
16{
17 memcpy(to, from, n);
18}
19
20
21int __do_copy_to_user(void *to, const void *from, int n,
22 void **fault_addr, jmp_buf **fault_catcher)
23{
24 unsigned long fault;
25 int faulted;
26
27 fault = __do_user_copy(to, from, n, fault_addr, fault_catcher,
28 __do_copy, &faulted);
29 if (!faulted)
30 return 0;
31 else
32 return n - (fault - (unsigned long) to);
33}
diff --git a/arch/um/os-Linux/sys-i386/Makefile b/arch/um/os-Linux/sys-i386/Makefile
new file mode 100644
index 00000000000..b4bc6ac4f30
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/Makefile
@@ -0,0 +1,10 @@
1#
2# Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3# Licensed under the GPL
4#
5
6obj-y = registers.o signal.o task_size.o tls.o
7
8USER_OBJS := $(obj-y)
9
10include arch/um/scripts/Makefile.rules
diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c
new file mode 100644
index 00000000000..229f7a53d8d
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/registers.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2004 PathScale, Inc
3 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <errno.h>
8#include <sys/ptrace.h>
9#include <sys/user.h>
10#include "kern_constants.h"
11#include "longjmp.h"
12#include "user.h"
13#include "sysdep/ptrace_user.h"
14
15int save_fp_registers(int pid, unsigned long *fp_regs)
16{
17 if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
18 return -errno;
19 return 0;
20}
21
22int restore_fp_registers(int pid, unsigned long *fp_regs)
23{
24 if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
25 return -errno;
26 return 0;
27}
28
29int save_fpx_registers(int pid, unsigned long *fp_regs)
30{
31 if (ptrace(PTRACE_GETFPXREGS, pid, 0, fp_regs) < 0)
32 return -errno;
33 return 0;
34}
35
36int restore_fpx_registers(int pid, unsigned long *fp_regs)
37{
38 if (ptrace(PTRACE_SETFPXREGS, pid, 0, fp_regs) < 0)
39 return -errno;
40 return 0;
41}
42
43unsigned long get_thread_reg(int reg, jmp_buf *buf)
44{
45 switch (reg) {
46 case EIP:
47 return buf[0]->__eip;
48 case UESP:
49 return buf[0]->__esp;
50 case EBP:
51 return buf[0]->__ebp;
52 default:
53 printk(UM_KERN_ERR "get_thread_regs - unknown register %d\n",
54 reg);
55 return 0;
56 }
57}
58
59int have_fpx_regs = 1;
60
61int get_fp_registers(int pid, unsigned long *regs)
62{
63 if (have_fpx_regs)
64 return save_fpx_registers(pid, regs);
65 else
66 return save_fp_registers(pid, regs);
67}
68
69int put_fp_registers(int pid, unsigned long *regs)
70{
71 if (have_fpx_regs)
72 return restore_fpx_registers(pid, regs);
73 else
74 return restore_fp_registers(pid, regs);
75}
76
77void arch_init_registers(int pid)
78{
79 struct user_fpxregs_struct fpx_regs;
80 int err;
81
82 err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs);
83 if (!err)
84 return;
85
86 if (errno != EIO)
87 panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d",
88 errno);
89
90 have_fpx_regs = 0;
91}
diff --git a/arch/um/os-Linux/sys-i386/signal.c b/arch/um/os-Linux/sys-i386/signal.c
new file mode 100644
index 00000000000..f311609f93d
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/signal.c
@@ -0,0 +1,13 @@
1/*
2 * Copyright (C) 2006 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <signal.h>
7
8extern void handle_signal(int sig, struct sigcontext *sc);
9
10void hard_handler(int sig)
11{
12 handle_signal(sig, (struct sigcontext *) (&sig + 1));
13}
diff --git a/arch/um/os-Linux/sys-i386/task_size.c b/arch/um/os-Linux/sys-i386/task_size.c
new file mode 100644
index 00000000000..be04c1e183b
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/task_size.c
@@ -0,0 +1,139 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <signal.h>
4#include <sys/mman.h>
5#include "longjmp.h"
6#include "kern_constants.h"
7
8static jmp_buf buf;
9
10static void segfault(int sig)
11{
12 longjmp(buf, 1);
13}
14
15static int page_ok(unsigned long page)
16{
17 unsigned long *address = (unsigned long *) (page << UM_KERN_PAGE_SHIFT);
18 unsigned long n = ~0UL;
19 void *mapped = NULL;
20 int ok = 0;
21
22 /*
23 * First see if the page is readable. If it is, it may still
24 * be a VDSO, so we go on to see if it's writable. If not
25 * then try mapping memory there. If that fails, then we're
26 * still in the kernel area. As a sanity check, we'll fail if
27 * the mmap succeeds, but gives us an address different from
28 * what we wanted.
29 */
30 if (setjmp(buf) == 0)
31 n = *address;
32 else {
33 mapped = mmap(address, UM_KERN_PAGE_SIZE,
34 PROT_READ | PROT_WRITE,
35 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
36 if (mapped == MAP_FAILED)
37 return 0;
38 if (mapped != address)
39 goto out;
40 }
41
42 /*
43 * Now, is it writeable? If so, then we're in user address
44 * space. If not, then try mprotecting it and try the write
45 * again.
46 */
47 if (setjmp(buf) == 0) {
48 *address = n;
49 ok = 1;
50 goto out;
51 } else if (mprotect(address, UM_KERN_PAGE_SIZE,
52 PROT_READ | PROT_WRITE) != 0)
53 goto out;
54
55 if (setjmp(buf) == 0) {
56 *address = n;
57 ok = 1;
58 }
59
60 out:
61 if (mapped != NULL)
62 munmap(mapped, UM_KERN_PAGE_SIZE);
63 return ok;
64}
65
66unsigned long os_get_top_address(void)
67{
68 struct sigaction sa, old;
69 unsigned long bottom = 0;
70 /*
71 * A 32-bit UML on a 64-bit host gets confused about the VDSO at
72 * 0xffffe000. It is mapped, is readable, can be reprotected writeable
73 * and written. However, exec discovers later that it can't be
74 * unmapped. So, just set the highest address to be checked to just
75 * below it. This might waste some address space on 4G/4G 32-bit
76 * hosts, but shouldn't hurt otherwise.
77 */
78 unsigned long top = 0xffffd000 >> UM_KERN_PAGE_SHIFT;
79 unsigned long test, original;
80
81 printf("Locating the bottom of the address space ... ");
82 fflush(stdout);
83
84 /*
85 * We're going to be longjmping out of the signal handler, so
86 * SA_DEFER needs to be set.
87 */
88 sa.sa_handler = segfault;
89 sigemptyset(&sa.sa_mask);
90 sa.sa_flags = SA_NODEFER;
91 if (sigaction(SIGSEGV, &sa, &old)) {
92 perror("os_get_top_address");
93 exit(1);
94 }
95
96 /* Manually scan the address space, bottom-up, until we find
97 * the first valid page (or run out of them).
98 */
99 for (bottom = 0; bottom < top; bottom++) {
100 if (page_ok(bottom))
101 break;
102 }
103
104 /* If we've got this far, we ran out of pages. */
105 if (bottom == top) {
106 fprintf(stderr, "Unable to determine bottom of address "
107 "space.\n");
108 exit(1);
109 }
110
111 printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
112 printf("Locating the top of the address space ... ");
113 fflush(stdout);
114
115 original = bottom;
116
117 /* This could happen with a 4G/4G split */
118 if (page_ok(top))
119 goto out;
120
121 do {
122 test = bottom + (top - bottom) / 2;
123 if (page_ok(test))
124 bottom = test;
125 else
126 top = test;
127 } while (top - bottom > 1);
128
129out:
130 /* Restore the old SIGSEGV handling */
131 if (sigaction(SIGSEGV, &old, NULL)) {
132 perror("os_get_top_address");
133 exit(1);
134 }
135 top <<= UM_KERN_PAGE_SHIFT;
136 printf("0x%x\n", top);
137
138 return top;
139}
diff --git a/arch/um/os-Linux/sys-i386/tls.c b/arch/um/os-Linux/sys-i386/tls.c
new file mode 100644
index 00000000000..32ed41ec1a3
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/tls.c
@@ -0,0 +1,36 @@
1#include <errno.h>
2#include <linux/unistd.h>
3
4#include <sys/syscall.h>
5#include <unistd.h>
6
7#include "sysdep/tls.h"
8#include "user.h"
9
10/* Checks whether host supports TLS, and sets *tls_min according to the value
11 * valid on the host.
12 * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */
13void check_host_supports_tls(int *supports_tls, int *tls_min) {
14 /* Values for x86 and x86_64.*/
15 int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64};
16 int i;
17
18 for (i = 0; i < ARRAY_SIZE(val); i++) {
19 user_desc_t info;
20 info.entry_number = val[i];
21
22 if (syscall(__NR_get_thread_area, &info) == 0) {
23 *tls_min = val[i];
24 *supports_tls = 1;
25 return;
26 } else {
27 if (errno == EINVAL)
28 continue;
29 else if (errno == ENOSYS)
30 *supports_tls = 0;
31 return;
32 }
33 }
34
35 *supports_tls = 0;
36}
diff --git a/arch/um/os-Linux/sys-x86_64/Makefile b/arch/um/os-Linux/sys-x86_64/Makefile
new file mode 100644
index 00000000000..a44a47f8f57
--- /dev/null
+++ b/arch/um/os-Linux/sys-x86_64/Makefile
@@ -0,0 +1,10 @@
1#
2# Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3# Licensed under the GPL
4#
5
6obj-y = registers.o prctl.o signal.o task_size.o
7
8USER_OBJS := $(obj-y)
9
10include arch/um/scripts/Makefile.rules
diff --git a/arch/um/os-Linux/sys-x86_64/prctl.c b/arch/um/os-Linux/sys-x86_64/prctl.c
new file mode 100644
index 00000000000..9d34eddb517
--- /dev/null
+++ b/arch/um/os-Linux/sys-x86_64/prctl.c
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) 2007 Jeff Dike (jdike@{addtoit.com,linux.intel.com})
3 * Licensed under the GPL
4 */
5
6#include <sys/ptrace.h>
7#include <linux/ptrace.h>
8
9int os_arch_prctl(int pid, int code, unsigned long *addr)
10{
11 return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) addr, code);
12}
diff --git a/arch/um/os-Linux/sys-x86_64/registers.c b/arch/um/os-Linux/sys-x86_64/registers.c
new file mode 100644
index 00000000000..594d97ad02b
--- /dev/null
+++ b/arch/um/os-Linux/sys-x86_64/registers.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2006 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <errno.h>
7#include <sys/ptrace.h>
8#define __FRAME_OFFSETS
9#include <asm/ptrace.h>
10#include "kern_constants.h"
11#include "longjmp.h"
12#include "user.h"
13
14int save_fp_registers(int pid, unsigned long *fp_regs)
15{
16 if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
17 return -errno;
18 return 0;
19}
20
21int restore_fp_registers(int pid, unsigned long *fp_regs)
22{
23 if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
24 return -errno;
25 return 0;
26}
27
28unsigned long get_thread_reg(int reg, jmp_buf *buf)
29{
30 switch (reg) {
31 case RIP:
32 return buf[0]->__rip;
33 case RSP:
34 return buf[0]->__rsp;
35 case RBP:
36 return buf[0]->__rbp;
37 default:
38 printk(UM_KERN_ERR "get_thread_regs - unknown register %d\n",
39 reg);
40 return 0;
41 }
42}
43
44int get_fp_registers(int pid, unsigned long *regs)
45{
46 return save_fp_registers(pid, regs);
47}
48
49int put_fp_registers(int pid, unsigned long *regs)
50{
51 return restore_fp_registers(pid, regs);
52}
diff --git a/arch/um/os-Linux/sys-x86_64/signal.c b/arch/um/os-Linux/sys-x86_64/signal.c
new file mode 100644
index 00000000000..82a388822cd
--- /dev/null
+++ b/arch/um/os-Linux/sys-x86_64/signal.c
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2006 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <signal.h>
7
8extern void handle_signal(int sig, struct sigcontext *sc);
9
10void hard_handler(int sig)
11{
12 struct ucontext *uc;
13 asm("movq %%rdx, %0" : "=r" (uc));
14
15 handle_signal(sig, (struct sigcontext *) &uc->uc_mcontext);
16}
diff --git a/arch/um/os-Linux/sys-x86_64/task_size.c b/arch/um/os-Linux/sys-x86_64/task_size.c
new file mode 100644
index 00000000000..26a0dd1f349
--- /dev/null
+++ b/arch/um/os-Linux/sys-x86_64/task_size.c
@@ -0,0 +1,5 @@
1unsigned long os_get_top_address(unsigned long shift)
2{
3 /* The old value of CONFIG_TOP_ADDR */
4 return 0x7fc0000000;
5}
diff --git a/arch/um/os-Linux/tls.c b/arch/um/os-Linux/tls.c
new file mode 100644
index 00000000000..73277801ef1
--- /dev/null
+++ b/arch/um/os-Linux/tls.c
@@ -0,0 +1,35 @@
1#include <errno.h>
2#include <sys/ptrace.h>
3#include "sysdep/tls.h"
4
5/* TLS support - we basically rely on the host's one.*/
6
7#ifndef PTRACE_GET_THREAD_AREA
8#define PTRACE_GET_THREAD_AREA 25
9#endif
10
11#ifndef PTRACE_SET_THREAD_AREA
12#define PTRACE_SET_THREAD_AREA 26
13#endif
14
15int os_set_thread_area(user_desc_t *info, int pid)
16{
17 int ret;
18
19 ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number,
20 (unsigned long) info);
21 if (ret < 0)
22 ret = -errno;
23 return ret;
24}
25
26int os_get_thread_area(user_desc_t *info, int pid)
27{
28 int ret;
29
30 ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number,
31 (unsigned long) info);
32 if (ret < 0)
33 ret = -errno;
34 return ret;
35}
diff --git a/arch/um/os-Linux/uaccess.c b/arch/um/os-Linux/uaccess.c
new file mode 100644
index 00000000000..087ed74ffca
--- /dev/null
+++ b/arch/um/os-Linux/uaccess.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <stddef.h>
8#include "longjmp.h"
9
10unsigned long __do_user_copy(void *to, const void *from, int n,
11 void **fault_addr, jmp_buf **fault_catcher,
12 void (*op)(void *to, const void *from,
13 int n), int *faulted_out)
14{
15 unsigned long *faddrp = (unsigned long *) fault_addr, ret;
16
17 jmp_buf jbuf;
18 *fault_catcher = &jbuf;
19 if (UML_SETJMP(&jbuf) == 0) {
20 (*op)(to, from, n);
21 ret = 0;
22 *faulted_out = 0;
23 }
24 else {
25 ret = *faddrp;
26 *faulted_out = 1;
27 }
28 *fault_addr = NULL;
29 *fault_catcher = NULL;
30 return ret;
31}
32
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
new file mode 100644
index 00000000000..3923cfb8764
--- /dev/null
+++ b/arch/um/sys-i386/Makefile
@@ -0,0 +1,24 @@
1#
2# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3#
4
5obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
6 ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
7 sys_call_table.o tls.o atomic64_cx8_32.o mem.o
8
9obj-$(CONFIG_BINFMT_ELF) += elfcore.o
10
11subarch-obj-y = lib/string_32.o
12subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
13subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
14subarch-obj-$(CONFIG_MODULES) += kernel/module.o
15
16USER_OBJS := bugs.o ptrace_user.o fault.o
17
18USER_OBJS += user-offsets.s
19extra-y += user-offsets.s
20
21UNPROFILE_OBJS := stub_segv.o
22CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
23
24include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-i386/asm/archparam.h b/arch/um/sys-i386/asm/archparam.h
new file mode 100644
index 00000000000..2a18a884ca1
--- /dev/null
+++ b/arch/um/sys-i386/asm/archparam.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_ARCHPARAM_I386_H
7#define __UM_ARCHPARAM_I386_H
8
9#ifdef CONFIG_X86_PAE
10#define LAST_PKMAP 512
11#else
12#define LAST_PKMAP 1024
13#endif
14
15#endif
16
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
new file mode 100644
index 00000000000..42305551d20
--- /dev/null
+++ b/arch/um/sys-i386/asm/elf.h
@@ -0,0 +1,125 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5#ifndef __UM_ELF_I386_H
6#define __UM_ELF_I386_H
7
8#include <asm/user.h>
9#include "skas.h"
10
11#define R_386_NONE 0
12#define R_386_32 1
13#define R_386_PC32 2
14#define R_386_GOT32 3
15#define R_386_PLT32 4
16#define R_386_COPY 5
17#define R_386_GLOB_DAT 6
18#define R_386_JMP_SLOT 7
19#define R_386_RELATIVE 8
20#define R_386_GOTOFF 9
21#define R_386_GOTPC 10
22#define R_386_NUM 11
23
24typedef unsigned long elf_greg_t;
25
26#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
27typedef elf_greg_t elf_gregset_t[ELF_NGREG];
28
29typedef struct user_i387_struct elf_fpregset_t;
30
31/*
32 * This is used to ensure we don't load something for the wrong architecture.
33 */
34#define elf_check_arch(x) \
35 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
36
37#define ELF_CLASS ELFCLASS32
38#define ELF_DATA ELFDATA2LSB
39#define ELF_ARCH EM_386
40
41#define ELF_PLAT_INIT(regs, load_addr) do { \
42 PT_REGS_EBX(regs) = 0; \
43 PT_REGS_ECX(regs) = 0; \
44 PT_REGS_EDX(regs) = 0; \
45 PT_REGS_ESI(regs) = 0; \
46 PT_REGS_EDI(regs) = 0; \
47 PT_REGS_EBP(regs) = 0; \
48 PT_REGS_EAX(regs) = 0; \
49} while (0)
50
51#define ELF_EXEC_PAGESIZE 4096
52
53#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
54
55/* Shamelessly stolen from include/asm-i386/elf.h */
56
57#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
58 pr_reg[0] = PT_REGS_EBX(regs); \
59 pr_reg[1] = PT_REGS_ECX(regs); \
60 pr_reg[2] = PT_REGS_EDX(regs); \
61 pr_reg[3] = PT_REGS_ESI(regs); \
62 pr_reg[4] = PT_REGS_EDI(regs); \
63 pr_reg[5] = PT_REGS_EBP(regs); \
64 pr_reg[6] = PT_REGS_EAX(regs); \
65 pr_reg[7] = PT_REGS_DS(regs); \
66 pr_reg[8] = PT_REGS_ES(regs); \
67 /* fake once used fs and gs selectors? */ \
68 pr_reg[9] = PT_REGS_DS(regs); \
69 pr_reg[10] = PT_REGS_DS(regs); \
70 pr_reg[11] = PT_REGS_SYSCALL_NR(regs); \
71 pr_reg[12] = PT_REGS_IP(regs); \
72 pr_reg[13] = PT_REGS_CS(regs); \
73 pr_reg[14] = PT_REGS_EFLAGS(regs); \
74 pr_reg[15] = PT_REGS_SP(regs); \
75 pr_reg[16] = PT_REGS_SS(regs); \
76} while (0);
77
78#define task_pt_regs(t) (&(t)->thread.regs)
79
80struct task_struct;
81
82extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
83
84#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
85
86extern long elf_aux_hwcap;
87#define ELF_HWCAP (elf_aux_hwcap)
88
89extern char * elf_aux_platform;
90#define ELF_PLATFORM (elf_aux_platform)
91
92#define SET_PERSONALITY(ex) do { } while (0)
93
94extern unsigned long vsyscall_ehdr;
95extern unsigned long vsyscall_end;
96extern unsigned long __kernel_vsyscall;
97
98#define VSYSCALL_BASE vsyscall_ehdr
99#define VSYSCALL_END vsyscall_end
100
101/*
102 * This is the range that is readable by user mode, and things
103 * acting like user mode such as get_user_pages.
104 */
105#define FIXADDR_USER_START VSYSCALL_BASE
106#define FIXADDR_USER_END VSYSCALL_END
107
108#define __HAVE_ARCH_GATE_AREA 1
109
110/*
111 * Architecture-neutral AT_ values in 0-17, leave some room
112 * for more of them, start the x86-specific ones at 32.
113 */
114#define AT_SYSINFO 32
115#define AT_SYSINFO_EHDR 33
116
117#define ARCH_DLINFO \
118do { \
119 if ( vsyscall_ehdr ) { \
120 NEW_AUX_ENT(AT_SYSINFO, __kernel_vsyscall); \
121 NEW_AUX_ENT(AT_SYSINFO_EHDR, vsyscall_ehdr); \
122 } \
123} while (0)
124
125#endif
diff --git a/arch/um/sys-i386/asm/module.h b/arch/um/sys-i386/asm/module.h
new file mode 100644
index 00000000000..5ead4a0b2e3
--- /dev/null
+++ b/arch/um/sys-i386/asm/module.h
@@ -0,0 +1,13 @@
1#ifndef __UM_MODULE_I386_H
2#define __UM_MODULE_I386_H
3
4/* UML is simple */
5struct mod_arch_specific
6{
7};
8
9#define Elf_Shdr Elf32_Shdr
10#define Elf_Sym Elf32_Sym
11#define Elf_Ehdr Elf32_Ehdr
12
13#endif
diff --git a/arch/um/sys-i386/asm/processor.h b/arch/um/sys-i386/asm/processor.h
new file mode 100644
index 00000000000..82a9061ab5b
--- /dev/null
+++ b/arch/um/sys-i386/asm/processor.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PROCESSOR_I386_H
7#define __UM_PROCESSOR_I386_H
8
9#include "linux/string.h"
10#include <sysdep/host_ldt.h>
11#include "asm/segment.h"
12
13extern int host_has_cmov;
14
15/* include faultinfo structure */
16#include "sysdep/faultinfo.h"
17
18struct uml_tls_struct {
19 struct user_desc tls;
20 unsigned flushed:1;
21 unsigned present:1;
22};
23
24struct arch_thread {
25 struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
26 unsigned long debugregs[8];
27 int debugregs_seq;
28 struct faultinfo faultinfo;
29};
30
31#define INIT_ARCH_THREAD { \
32 .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
33 { .present = 0, .flushed = 0 } }, \
34 .debugregs = { [ 0 ... 7 ] = 0 }, \
35 .debugregs_seq = 0, \
36 .faultinfo = { 0, 0, 0 } \
37}
38
39static inline void arch_flush_thread(struct arch_thread *thread)
40{
41 /* Clear any TLS still hanging */
42 memset(&thread->tls_array, 0, sizeof(thread->tls_array));
43}
44
45static inline void arch_copy_thread(struct arch_thread *from,
46 struct arch_thread *to)
47{
48 memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
49}
50
51#include <asm/user.h>
52
53/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
54static inline void rep_nop(void)
55{
56 __asm__ __volatile__("rep;nop": : :"memory");
57}
58
59#define cpu_relax() rep_nop()
60
61/*
62 * Default implementation of macro that returns current
63 * instruction pointer ("program counter"). Stolen
64 * from asm-i386/processor.h
65 */
66#define current_text_addr() \
67 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
68
69#define ARCH_IS_STACKGROW(address) \
70 (address + 32 >= UPT_SP(&current->thread.regs.regs))
71
72#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
73#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
74#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
75
76#include "asm/processor-generic.h"
77
78#endif
diff --git a/arch/um/sys-i386/asm/ptrace.h b/arch/um/sys-i386/asm/ptrace.h
new file mode 100644
index 00000000000..5d2a5911253
--- /dev/null
+++ b/arch/um/sys-i386/asm/ptrace.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __UM_PTRACE_I386_H
7#define __UM_PTRACE_I386_H
8
9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10
11#include "linux/compiler.h"
12#include "asm/ptrace-generic.h"
13
14#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
15#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
16#define PT_REGS_ECX(r) UPT_ECX(&(r)->regs)
17#define PT_REGS_EDX(r) UPT_EDX(&(r)->regs)
18#define PT_REGS_ESI(r) UPT_ESI(&(r)->regs)
19#define PT_REGS_EDI(r) UPT_EDI(&(r)->regs)
20#define PT_REGS_EBP(r) UPT_EBP(&(r)->regs)
21
22#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
23#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
24#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
25#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
26#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
27#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
28
29#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
30
31#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_EAX(r)
32#define PT_REGS_SYSCALL_RET(r) PT_REGS_EAX(r)
33#define PT_FIX_EXEC_STACK(sp) do ; while(0)
34
35#define profile_pc(regs) PT_REGS_IP(regs)
36
37#define user_mode(r) UPT_IS_USER(&(r)->regs)
38
39/*
40 * Forward declaration to avoid including sysdep/tls.h, which causes a
41 * circular include, and compilation failures.
42 */
43struct user_desc;
44
45extern int ptrace_get_thread_area(struct task_struct *child, int idx,
46 struct user_desc __user *user_desc);
47
48extern int ptrace_set_thread_area(struct task_struct *child, int idx,
49 struct user_desc __user *user_desc);
50
51#endif
diff --git a/arch/um/sys-i386/atomic64_cx8_32.S b/arch/um/sys-i386/atomic64_cx8_32.S
new file mode 100644
index 00000000000..1e901d3d4a9
--- /dev/null
+++ b/arch/um/sys-i386/atomic64_cx8_32.S
@@ -0,0 +1,225 @@
1/*
2 * atomic64_t for 586+
3 *
4 * Copied from arch/x86/lib/atomic64_cx8_32.S
5 *
6 * Copyright © 2010 Luca Barbieri
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <linux/linkage.h>
16#include <asm/alternative-asm.h>
17#include <asm/dwarf2.h>
18
19.macro SAVE reg
20 pushl_cfi %\reg
21 CFI_REL_OFFSET \reg, 0
22.endm
23
24.macro RESTORE reg
25 popl_cfi %\reg
26 CFI_RESTORE \reg
27.endm
28
29.macro read64 reg
30 movl %ebx, %eax
31 movl %ecx, %edx
32/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
33 LOCK_PREFIX
34 cmpxchg8b (\reg)
35.endm
36
37ENTRY(atomic64_read_cx8)
38 CFI_STARTPROC
39
40 read64 %ecx
41 ret
42 CFI_ENDPROC
43ENDPROC(atomic64_read_cx8)
44
45ENTRY(atomic64_set_cx8)
46 CFI_STARTPROC
47
481:
49/* we don't need LOCK_PREFIX since aligned 64-bit writes
50 * are atomic on 586 and newer */
51 cmpxchg8b (%esi)
52 jne 1b
53
54 ret
55 CFI_ENDPROC
56ENDPROC(atomic64_set_cx8)
57
58ENTRY(atomic64_xchg_cx8)
59 CFI_STARTPROC
60
61 movl %ebx, %eax
62 movl %ecx, %edx
631:
64 LOCK_PREFIX
65 cmpxchg8b (%esi)
66 jne 1b
67
68 ret
69 CFI_ENDPROC
70ENDPROC(atomic64_xchg_cx8)
71
72.macro addsub_return func ins insc
73ENTRY(atomic64_\func\()_return_cx8)
74 CFI_STARTPROC
75 SAVE ebp
76 SAVE ebx
77 SAVE esi
78 SAVE edi
79
80 movl %eax, %esi
81 movl %edx, %edi
82 movl %ecx, %ebp
83
84 read64 %ebp
851:
86 movl %eax, %ebx
87 movl %edx, %ecx
88 \ins\()l %esi, %ebx
89 \insc\()l %edi, %ecx
90 LOCK_PREFIX
91 cmpxchg8b (%ebp)
92 jne 1b
93
9410:
95 movl %ebx, %eax
96 movl %ecx, %edx
97 RESTORE edi
98 RESTORE esi
99 RESTORE ebx
100 RESTORE ebp
101 ret
102 CFI_ENDPROC
103ENDPROC(atomic64_\func\()_return_cx8)
104.endm
105
106addsub_return add add adc
107addsub_return sub sub sbb
108
109.macro incdec_return func ins insc
110ENTRY(atomic64_\func\()_return_cx8)
111 CFI_STARTPROC
112 SAVE ebx
113
114 read64 %esi
1151:
116 movl %eax, %ebx
117 movl %edx, %ecx
118 \ins\()l $1, %ebx
119 \insc\()l $0, %ecx
120 LOCK_PREFIX
121 cmpxchg8b (%esi)
122 jne 1b
123
12410:
125 movl %ebx, %eax
126 movl %ecx, %edx
127 RESTORE ebx
128 ret
129 CFI_ENDPROC
130ENDPROC(atomic64_\func\()_return_cx8)
131.endm
132
133incdec_return inc add adc
134incdec_return dec sub sbb
135
136ENTRY(atomic64_dec_if_positive_cx8)
137 CFI_STARTPROC
138 SAVE ebx
139
140 read64 %esi
1411:
142 movl %eax, %ebx
143 movl %edx, %ecx
144 subl $1, %ebx
145 sbb $0, %ecx
146 js 2f
147 LOCK_PREFIX
148 cmpxchg8b (%esi)
149 jne 1b
150
1512:
152 movl %ebx, %eax
153 movl %ecx, %edx
154 RESTORE ebx
155 ret
156 CFI_ENDPROC
157ENDPROC(atomic64_dec_if_positive_cx8)
158
159ENTRY(atomic64_add_unless_cx8)
160 CFI_STARTPROC
161 SAVE ebp
162 SAVE ebx
163/* these just push these two parameters on the stack */
164 SAVE edi
165 SAVE esi
166
167 movl %ecx, %ebp
168 movl %eax, %esi
169 movl %edx, %edi
170
171 read64 %ebp
1721:
173 cmpl %eax, 0(%esp)
174 je 4f
1752:
176 movl %eax, %ebx
177 movl %edx, %ecx
178 addl %esi, %ebx
179 adcl %edi, %ecx
180 LOCK_PREFIX
181 cmpxchg8b (%ebp)
182 jne 1b
183
184 movl $1, %eax
1853:
186 addl $8, %esp
187 CFI_ADJUST_CFA_OFFSET -8
188 RESTORE ebx
189 RESTORE ebp
190 ret
1914:
192 cmpl %edx, 4(%esp)
193 jne 2b
194 xorl %eax, %eax
195 jmp 3b
196 CFI_ENDPROC
197ENDPROC(atomic64_add_unless_cx8)
198
199ENTRY(atomic64_inc_not_zero_cx8)
200 CFI_STARTPROC
201 SAVE ebx
202
203 read64 %esi
2041:
205 testl %eax, %eax
206 je 4f
2072:
208 movl %eax, %ebx
209 movl %edx, %ecx
210 addl $1, %ebx
211 adcl $0, %ecx
212 LOCK_PREFIX
213 cmpxchg8b (%esi)
214 jne 1b
215
216 movl $1, %eax
2173:
218 RESTORE ebx
219 ret
2204:
221 testl %edx, %edx
222 jne 2b
223 jmp 3b
224 CFI_ENDPROC
225ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c
new file mode 100644
index 00000000000..8d4f273f121
--- /dev/null
+++ b/arch/um/sys-i386/bug.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL V2
4 */
5
6#include <linux/uaccess.h>
7#include <asm/errno.h>
8
9/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
10 * that's not relevant in skas mode.
11 */
12
13int is_valid_bugaddr(unsigned long eip)
14{
15 unsigned short ud2;
16
17 if (probe_kernel_address((unsigned short __user *)eip, ud2))
18 return 0;
19
20 return ud2 == 0x0b0f;
21}
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
new file mode 100644
index 00000000000..2c6d0d731c1
--- /dev/null
+++ b/arch/um/sys-i386/bugs.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <signal.h>
7#include "kern_constants.h"
8#include "kern_util.h"
9#include "longjmp.h"
10#include "task.h"
11#include "user.h"
12#include "sysdep/ptrace.h"
13
14/* Set during early boot */
15static int host_has_cmov = 1;
16static jmp_buf cmov_test_return;
17
18static void cmov_sigill_test_handler(int sig)
19{
20 host_has_cmov = 0;
21 longjmp(cmov_test_return, 1);
22}
23
24void arch_check_bugs(void)
25{
26 struct sigaction old, new;
27
28 printk(UM_KERN_INFO "Checking for host processor cmov support...");
29 new.sa_handler = cmov_sigill_test_handler;
30
31 /* Make sure that SIGILL is enabled after the handler longjmps back */
32 new.sa_flags = SA_NODEFER;
33 sigemptyset(&new.sa_mask);
34 sigaction(SIGILL, &new, &old);
35
36 if (setjmp(cmov_test_return) == 0) {
37 unsigned long foo = 0;
38 __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
39 printk(UM_KERN_CONT "Yes\n");
40 } else
41 printk(UM_KERN_CONT "No\n");
42
43 sigaction(SIGILL, &old, &new);
44}
45
46void arch_examine_signal(int sig, struct uml_pt_regs *regs)
47{
48 unsigned char tmp[2];
49
50 /*
51 * This is testing for a cmov (0x0f 0x4x) instruction causing a
52 * SIGILL in init.
53 */
54 if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
55 return;
56
57 if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
58 printk(UM_KERN_ERR "SIGILL in init, could not read "
59 "instructions!\n");
60 return;
61 }
62
63 if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
64 return;
65
66 if (host_has_cmov == 0)
67 printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
68 "processor doesn't implement. Boot a filesystem "
69 "compiled for older processors");
70 else if (host_has_cmov == 1)
71 printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
72 "processor claims to implement");
73 else
74 printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
75 host_has_cmov);
76}
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S
new file mode 100644
index 00000000000..f058d2f82e1
--- /dev/null
+++ b/arch/um/sys-i386/checksum.S
@@ -0,0 +1,458 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Pentium Pro/II routines:
12 * Alexander Kjeldaas <astor@guardian.no>
13 * Finn Arne Gangstad <finnag@guardian.no>
14 * Lots of code moved from tcp.c and ip.c; see those files
15 * for more names.
16 *
17 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
18 * handling.
19 * Andi Kleen, add zeroing on error
20 * converted to pure assembler
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <asm/errno.h>
29
30/*
31 * computes a partial checksum, e.g. for TCP/UDP fragments
32 */
33
34/*
35unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
36 */
37
38.text
39.align 4
40.globl csum_partial
41
42#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
43
44 /*
45 * Experiments with Ethernet and SLIP connections show that buff
46 * is aligned on either a 2-byte or 4-byte boundary. We get at
47 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
48 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
49 * alignment for the unrolled loop.
50 */
51csum_partial:
52 pushl %esi
53 pushl %ebx
54 movl 20(%esp),%eax # Function arg: unsigned int sum
55 movl 16(%esp),%ecx # Function arg: int len
56 movl 12(%esp),%esi # Function arg: unsigned char *buff
57 testl $2, %esi # Check alignment.
58 jz 2f # Jump if alignment is ok.
59 subl $2, %ecx # Alignment uses up two bytes.
60 jae 1f # Jump if we had at least two bytes.
61 addl $2, %ecx # ecx was < 2. Deal with it.
62 jmp 4f
631: movw (%esi), %bx
64 addl $2, %esi
65 addw %bx, %ax
66 adcl $0, %eax
672:
68 movl %ecx, %edx
69 shrl $5, %ecx
70 jz 2f
71 testl %esi, %esi
721: movl (%esi), %ebx
73 adcl %ebx, %eax
74 movl 4(%esi), %ebx
75 adcl %ebx, %eax
76 movl 8(%esi), %ebx
77 adcl %ebx, %eax
78 movl 12(%esi), %ebx
79 adcl %ebx, %eax
80 movl 16(%esi), %ebx
81 adcl %ebx, %eax
82 movl 20(%esi), %ebx
83 adcl %ebx, %eax
84 movl 24(%esi), %ebx
85 adcl %ebx, %eax
86 movl 28(%esi), %ebx
87 adcl %ebx, %eax
88 lea 32(%esi), %esi
89 dec %ecx
90 jne 1b
91 adcl $0, %eax
922: movl %edx, %ecx
93 andl $0x1c, %edx
94 je 4f
95 shrl $2, %edx # This clears CF
963: adcl (%esi), %eax
97 lea 4(%esi), %esi
98 dec %edx
99 jne 3b
100 adcl $0, %eax
1014: andl $3, %ecx
102 jz 7f
103 cmpl $2, %ecx
104 jb 5f
105 movw (%esi),%cx
106 leal 2(%esi),%esi
107 je 6f
108 shll $16,%ecx
1095: movb (%esi),%cl
1106: addl %ecx,%eax
111 adcl $0, %eax
1127:
113 popl %ebx
114 popl %esi
115 ret
116
117#else
118
119/* Version for PentiumII/PPro */
120
121csum_partial:
122 pushl %esi
123 pushl %ebx
124 movl 20(%esp),%eax # Function arg: unsigned int sum
125 movl 16(%esp),%ecx # Function arg: int len
126 movl 12(%esp),%esi # Function arg: const unsigned char *buf
127
128 testl $2, %esi
129 jnz 30f
13010:
131 movl %ecx, %edx
132 movl %ecx, %ebx
133 andl $0x7c, %ebx
134 shrl $7, %ecx
135 addl %ebx,%esi
136 shrl $2, %ebx
137 negl %ebx
138 lea 45f(%ebx,%ebx,2), %ebx
139 testl %esi, %esi
140 jmp *%ebx
141
142 # Handle 2-byte-aligned regions
14320: addw (%esi), %ax
144 lea 2(%esi), %esi
145 adcl $0, %eax
146 jmp 10b
147
14830: subl $2, %ecx
149 ja 20b
150 je 32f
151 movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
152 addl %ebx, %eax
153 adcl $0, %eax
154 jmp 80f
15532:
156 addw (%esi), %ax # csumming 2 bytes, 2-aligned
157 adcl $0, %eax
158 jmp 80f
159
16040:
161 addl -128(%esi), %eax
162 adcl -124(%esi), %eax
163 adcl -120(%esi), %eax
164 adcl -116(%esi), %eax
165 adcl -112(%esi), %eax
166 adcl -108(%esi), %eax
167 adcl -104(%esi), %eax
168 adcl -100(%esi), %eax
169 adcl -96(%esi), %eax
170 adcl -92(%esi), %eax
171 adcl -88(%esi), %eax
172 adcl -84(%esi), %eax
173 adcl -80(%esi), %eax
174 adcl -76(%esi), %eax
175 adcl -72(%esi), %eax
176 adcl -68(%esi), %eax
177 adcl -64(%esi), %eax
178 adcl -60(%esi), %eax
179 adcl -56(%esi), %eax
180 adcl -52(%esi), %eax
181 adcl -48(%esi), %eax
182 adcl -44(%esi), %eax
183 adcl -40(%esi), %eax
184 adcl -36(%esi), %eax
185 adcl -32(%esi), %eax
186 adcl -28(%esi), %eax
187 adcl -24(%esi), %eax
188 adcl -20(%esi), %eax
189 adcl -16(%esi), %eax
190 adcl -12(%esi), %eax
191 adcl -8(%esi), %eax
192 adcl -4(%esi), %eax
19345:
194 lea 128(%esi), %esi
195 adcl $0, %eax
196 dec %ecx
197 jge 40b
198 movl %edx, %ecx
19950: andl $3, %ecx
200 jz 80f
201
202 # Handle the last 1-3 bytes without jumping
203 notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
204 movl $0xffffff,%ebx # by the shll and shrl instructions
205 shll $3,%ecx
206 shrl %cl,%ebx
207 andl -128(%esi),%ebx # esi is 4-aligned so should be ok
208 addl %ebx,%eax
209 adcl $0,%eax
21080:
211 popl %ebx
212 popl %esi
213 ret
214
215#endif
216
217/*
218unsigned int csum_partial_copy_generic (const char *src, char *dst,
219 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
220 */
221
222/*
223 * Copy from ds while checksumming, otherwise like csum_partial
224 *
225 * The macros SRC and DST specify the type of access for the instruction.
226 * thus we can call a custom exception handler for all access types.
227 *
228 * FIXME: could someone double-check whether I haven't mixed up some SRC and
229 * DST definitions? It's damn hard to trigger all cases. I hope I got
230 * them all but there's no guarantee.
231 */
232
233#define SRC(y...) \
234 9999: y; \
235 .section __ex_table, "a"; \
236 .long 9999b, 6001f ; \
237 .previous
238
239#define DST(y...) \
240 9999: y; \
241 .section __ex_table, "a"; \
242 .long 9999b, 6002f ; \
243 .previous
244
245.align 4
246
247#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
248
249#define ARGBASE 16
250#define FP 12
251
252csum_partial_copy_generic_i386:
253 subl $4,%esp
254 pushl %edi
255 pushl %esi
256 pushl %ebx
257 movl ARGBASE+16(%esp),%eax # sum
258 movl ARGBASE+12(%esp),%ecx # len
259 movl ARGBASE+4(%esp),%esi # src
260 movl ARGBASE+8(%esp),%edi # dst
261
262 testl $2, %edi # Check alignment.
263 jz 2f # Jump if alignment is ok.
264 subl $2, %ecx # Alignment uses up two bytes.
265 jae 1f # Jump if we had at least two bytes.
266 addl $2, %ecx # ecx was < 2. Deal with it.
267 jmp 4f
268SRC(1: movw (%esi), %bx )
269 addl $2, %esi
270DST( movw %bx, (%edi) )
271 addl $2, %edi
272 addw %bx, %ax
273 adcl $0, %eax
2742:
275 movl %ecx, FP(%esp)
276 shrl $5, %ecx
277 jz 2f
278 testl %esi, %esi
279SRC(1: movl (%esi), %ebx )
280SRC( movl 4(%esi), %edx )
281 adcl %ebx, %eax
282DST( movl %ebx, (%edi) )
283 adcl %edx, %eax
284DST( movl %edx, 4(%edi) )
285
286SRC( movl 8(%esi), %ebx )
287SRC( movl 12(%esi), %edx )
288 adcl %ebx, %eax
289DST( movl %ebx, 8(%edi) )
290 adcl %edx, %eax
291DST( movl %edx, 12(%edi) )
292
293SRC( movl 16(%esi), %ebx )
294SRC( movl 20(%esi), %edx )
295 adcl %ebx, %eax
296DST( movl %ebx, 16(%edi) )
297 adcl %edx, %eax
298DST( movl %edx, 20(%edi) )
299
300SRC( movl 24(%esi), %ebx )
301SRC( movl 28(%esi), %edx )
302 adcl %ebx, %eax
303DST( movl %ebx, 24(%edi) )
304 adcl %edx, %eax
305DST( movl %edx, 28(%edi) )
306
307 lea 32(%esi), %esi
308 lea 32(%edi), %edi
309 dec %ecx
310 jne 1b
311 adcl $0, %eax
3122: movl FP(%esp), %edx
313 movl %edx, %ecx
314 andl $0x1c, %edx
315 je 4f
316 shrl $2, %edx # This clears CF
317SRC(3: movl (%esi), %ebx )
318 adcl %ebx, %eax
319DST( movl %ebx, (%edi) )
320 lea 4(%esi), %esi
321 lea 4(%edi), %edi
322 dec %edx
323 jne 3b
324 adcl $0, %eax
3254: andl $3, %ecx
326 jz 7f
327 cmpl $2, %ecx
328 jb 5f
329SRC( movw (%esi), %cx )
330 leal 2(%esi), %esi
331DST( movw %cx, (%edi) )
332 leal 2(%edi), %edi
333 je 6f
334 shll $16,%ecx
335SRC(5: movb (%esi), %cl )
336DST( movb %cl, (%edi) )
3376: addl %ecx, %eax
338 adcl $0, %eax
3397:
3405000:
341
342# Exception handler:
343.section .fixup, "ax"
344
3456001:
346 movl ARGBASE+20(%esp), %ebx # src_err_ptr
347 movl $-EFAULT, (%ebx)
348
349 # zero the complete destination - computing the rest
350 # is too much work
351 movl ARGBASE+8(%esp), %edi # dst
352 movl ARGBASE+12(%esp), %ecx # len
353 xorl %eax,%eax
354 rep ; stosb
355
356 jmp 5000b
357
3586002:
359 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
360 movl $-EFAULT,(%ebx)
361 jmp 5000b
362
363.previous
364
365 popl %ebx
366 popl %esi
367 popl %edi
368 popl %ecx # equivalent to addl $4,%esp
369 ret
370
371#else
372
373/* Version for PentiumII/PPro */
374
375#define ROUND1(x) \
376 SRC(movl x(%esi), %ebx ) ; \
377 addl %ebx, %eax ; \
378 DST(movl %ebx, x(%edi) ) ;
379
380#define ROUND(x) \
381 SRC(movl x(%esi), %ebx ) ; \
382 adcl %ebx, %eax ; \
383 DST(movl %ebx, x(%edi) ) ;
384
385#define ARGBASE 12
386
387csum_partial_copy_generic_i386:
388 pushl %ebx
389 pushl %edi
390 pushl %esi
391 movl ARGBASE+4(%esp),%esi #src
392 movl ARGBASE+8(%esp),%edi #dst
393 movl ARGBASE+12(%esp),%ecx #len
394 movl ARGBASE+16(%esp),%eax #sum
395# movl %ecx, %edx
396 movl %ecx, %ebx
397 movl %esi, %edx
398 shrl $6, %ecx
399 andl $0x3c, %ebx
400 negl %ebx
401 subl %ebx, %esi
402 subl %ebx, %edi
403 lea -1(%esi),%edx
404 andl $-32,%edx
405 lea 3f(%ebx,%ebx), %ebx
406 testl %esi, %esi
407 jmp *%ebx
4081: addl $64,%esi
409 addl $64,%edi
410 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
411 ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
412 ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
413 ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
414 ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
4153: adcl $0,%eax
416 addl $64, %edx
417 dec %ecx
418 jge 1b
4194: movl ARGBASE+12(%esp),%edx #len
420 andl $3, %edx
421 jz 7f
422 cmpl $2, %edx
423 jb 5f
424SRC( movw (%esi), %dx )
425 leal 2(%esi), %esi
426DST( movw %dx, (%edi) )
427 leal 2(%edi), %edi
428 je 6f
429 shll $16,%edx
4305:
431SRC( movb (%esi), %dl )
432DST( movb %dl, (%edi) )
4336: addl %edx, %eax
434 adcl $0, %eax
4357:
436.section .fixup, "ax"
4376001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
438 movl $-EFAULT, (%ebx)
439 # zero the complete destination (computing the rest is too much work)
440 movl ARGBASE+8(%esp),%edi # dst
441 movl ARGBASE+12(%esp),%ecx # len
442 xorl %eax,%eax
443 rep; stosb
444 jmp 7b
4456002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
446 movl $-EFAULT, (%ebx)
447 jmp 7b
448.previous
449
450 popl %esi
451 popl %edi
452 popl %ebx
453 ret
454
455#undef ROUND
456#undef ROUND1
457
458#endif
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c
new file mode 100644
index 00000000000..f3fe1a688f7
--- /dev/null
+++ b/arch/um/sys-i386/delay.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 * Mostly copied from arch/x86/lib/delay.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <asm/param.h>
14
15void __delay(unsigned long loops)
16{
17 asm volatile(
18 "test %0,%0\n"
19 "jz 3f\n"
20 "jmp 1f\n"
21
22 ".align 16\n"
23 "1: jmp 2f\n"
24
25 ".align 16\n"
26 "2: dec %0\n"
27 " jnz 2b\n"
28 "3: dec %0\n"
29
30 : /* we don't need output */
31 : "a" (loops)
32 );
33}
34EXPORT_SYMBOL(__delay);
35
36inline void __const_udelay(unsigned long xloops)
37{
38 int d0;
39
40 xloops *= 4;
41 asm("mull %%edx"
42 : "=d" (xloops), "=&a" (d0)
43 : "1" (xloops), "0"
44 (loops_per_jiffy * (HZ/4)));
45
46 __delay(++xloops);
47}
48EXPORT_SYMBOL(__const_udelay);
49
50void __udelay(unsigned long usecs)
51{
52 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
53}
54EXPORT_SYMBOL(__udelay);
55
56void __ndelay(unsigned long nsecs)
57{
58 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
59}
60EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-i386/elfcore.c b/arch/um/sys-i386/elfcore.c
new file mode 100644
index 00000000000..6bb49b687c9
--- /dev/null
+++ b/arch/um/sys-i386/elfcore.c
@@ -0,0 +1,83 @@
1#include <linux/elf.h>
2#include <linux/coredump.h>
3#include <linux/fs.h>
4#include <linux/mm.h>
5
6#include <asm/elf.h>
7
8
9Elf32_Half elf_core_extra_phdrs(void)
10{
11 return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
12}
13
14int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
15 unsigned long limit)
16{
17 if ( vsyscall_ehdr ) {
18 const struct elfhdr *const ehdrp =
19 (struct elfhdr *) vsyscall_ehdr;
20 const struct elf_phdr *const phdrp =
21 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
22 int i;
23 Elf32_Off ofs = 0;
24
25 for (i = 0; i < ehdrp->e_phnum; ++i) {
26 struct elf_phdr phdr = phdrp[i];
27
28 if (phdr.p_type == PT_LOAD) {
29 ofs = phdr.p_offset = offset;
30 offset += phdr.p_filesz;
31 } else {
32 phdr.p_offset += ofs;
33 }
34 phdr.p_paddr = 0; /* match other core phdrs */
35 *size += sizeof(phdr);
36 if (*size > limit
37 || !dump_write(file, &phdr, sizeof(phdr)))
38 return 0;
39 }
40 }
41 return 1;
42}
43
44int elf_core_write_extra_data(struct file *file, size_t *size,
45 unsigned long limit)
46{
47 if ( vsyscall_ehdr ) {
48 const struct elfhdr *const ehdrp =
49 (struct elfhdr *) vsyscall_ehdr;
50 const struct elf_phdr *const phdrp =
51 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
52 int i;
53
54 for (i = 0; i < ehdrp->e_phnum; ++i) {
55 if (phdrp[i].p_type == PT_LOAD) {
56 void *addr = (void *) phdrp[i].p_vaddr;
57 size_t filesz = phdrp[i].p_filesz;
58
59 *size += filesz;
60 if (*size > limit
61 || !dump_write(file, addr, filesz))
62 return 0;
63 }
64 }
65 }
66 return 1;
67}
68
69size_t elf_core_extra_data_size(void)
70{
71 if ( vsyscall_ehdr ) {
72 const struct elfhdr *const ehdrp =
73 (struct elfhdr *)vsyscall_ehdr;
74 const struct elf_phdr *const phdrp =
75 (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
76 int i;
77
78 for (i = 0; i < ehdrp->e_phnum; ++i)
79 if (phdrp[i].p_type == PT_LOAD)
80 return (size_t) phdrp[i].p_filesz;
81 }
82 return 0;
83}
diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c
new file mode 100644
index 00000000000..d670f68532f
--- /dev/null
+++ b/arch/um/sys-i386/fault.c
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "sysdep/ptrace.h"
7
8/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
9struct exception_table_entry
10{
11 unsigned long insn;
12 unsigned long fixup;
13};
14
15const struct exception_table_entry *search_exception_tables(unsigned long add);
16
17/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
18int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
19{
20 const struct exception_table_entry *fixup;
21
22 fixup = search_exception_tables(address);
23 if (fixup != 0) {
24 UPT_IP(regs) = fixup->fixup;
25 return 1;
26 }
27 return 0;
28}
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
new file mode 100644
index 00000000000..bfbefd30db8
--- /dev/null
+++ b/arch/um/sys-i386/ksyms.c
@@ -0,0 +1,5 @@
1#include "linux/module.h"
2#include "asm/checksum.h"
3
4/* Networking helper routines. */
5EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
new file mode 100644
index 00000000000..3f2bf208d88
--- /dev/null
+++ b/arch/um/sys-i386/ldt.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <asm/unistd.h>
10#include "os.h"
11#include "proc_mm.h"
12#include "skas.h"
13#include "skas_ptrace.h"
14#include "sysdep/tls.h"
15
16extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
17
18static long write_ldt_entry(struct mm_id *mm_idp, int func,
19 struct user_desc *desc, void **addr, int done)
20{
21 long res;
22
23 if (proc_mm) {
24 /*
25 * This is a special handling for the case, that the mm to
26 * modify isn't current->active_mm.
27 * If this is called directly by modify_ldt,
28 * (current->active_mm->context.skas.u == mm_idp)
29 * will be true. So no call to __switch_mm(mm_idp) is done.
30 * If this is called in case of init_new_ldt or PTRACE_LDT,
31 * mm_idp won't belong to current->active_mm, but child->mm.
32 * So we need to switch child's mm into our userspace, then
33 * later switch back.
34 *
35 * Note: I'm unsure: should interrupts be disabled here?
36 */
37 if (!current->active_mm || current->active_mm == &init_mm ||
38 mm_idp != &current->active_mm->context.id)
39 __switch_mm(mm_idp);
40 }
41
42 if (ptrace_ldt) {
43 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
44 .func = func,
45 .ptr = desc,
46 .bytecount = sizeof(*desc)};
47 u32 cpu;
48 int pid;
49
50 if (!proc_mm)
51 pid = mm_idp->u.pid;
52 else {
53 cpu = get_cpu();
54 pid = userspace_pid[cpu];
55 }
56
57 res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
58
59 if (proc_mm)
60 put_cpu();
61 }
62 else {
63 void *stub_addr;
64 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
65 (sizeof(*desc) + sizeof(long) - 1) &
66 ~(sizeof(long) - 1),
67 addr, &stub_addr);
68 if (!res) {
69 unsigned long args[] = { func,
70 (unsigned long)stub_addr,
71 sizeof(*desc),
72 0, 0, 0 };
73 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
74 0, addr, done);
75 }
76 }
77
78 if (proc_mm) {
79 /*
80 * This is the second part of special handling, that makes
81 * PTRACE_LDT possible to implement.
82 */
83 if (current->active_mm && current->active_mm != &init_mm &&
84 mm_idp != &current->active_mm->context.id)
85 __switch_mm(&current->active_mm->context.id);
86 }
87
88 return res;
89}
90
91static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
92{
93 int res, n;
94 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
95 .func = 0,
96 .bytecount = bytecount,
97 .ptr = kmalloc(bytecount, GFP_KERNEL)};
98 u32 cpu;
99
100 if (ptrace_ldt.ptr == NULL)
101 return -ENOMEM;
102
103 /*
104 * This is called from sys_modify_ldt only, so userspace_pid gives
105 * us the right number
106 */
107
108 cpu = get_cpu();
109 res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
110 put_cpu();
111 if (res < 0)
112 goto out;
113
114 n = copy_to_user(ptr, ptrace_ldt.ptr, res);
115 if (n != 0)
116 res = -EFAULT;
117
118 out:
119 kfree(ptrace_ldt.ptr);
120
121 return res;
122}
123
124/*
125 * In skas mode, we hold our own ldt data in UML.
126 * Thus, the code implementing sys_modify_ldt_skas
127 * is very similar to (and mostly stolen from) sys_modify_ldt
128 * for arch/i386/kernel/ldt.c
129 * The routines copied and modified in part are:
130 * - read_ldt
131 * - read_default_ldt
132 * - write_ldt
133 * - sys_modify_ldt_skas
134 */
135
136static int read_ldt(void __user * ptr, unsigned long bytecount)
137{
138 int i, err = 0;
139 unsigned long size;
140 uml_ldt_t * ldt = &current->mm->context.ldt;
141
142 if (!ldt->entry_count)
143 goto out;
144 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
145 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
146 err = bytecount;
147
148 if (ptrace_ldt)
149 return read_ldt_from_host(ptr, bytecount);
150
151 mutex_lock(&ldt->lock);
152 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
153 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
154 if (size > bytecount)
155 size = bytecount;
156 if (copy_to_user(ptr, ldt->u.entries, size))
157 err = -EFAULT;
158 bytecount -= size;
159 ptr += size;
160 }
161 else {
162 for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
163 i++) {
164 size = PAGE_SIZE;
165 if (size > bytecount)
166 size = bytecount;
167 if (copy_to_user(ptr, ldt->u.pages[i], size)) {
168 err = -EFAULT;
169 break;
170 }
171 bytecount -= size;
172 ptr += size;
173 }
174 }
175 mutex_unlock(&ldt->lock);
176
177 if (bytecount == 0 || err == -EFAULT)
178 goto out;
179
180 if (clear_user(ptr, bytecount))
181 err = -EFAULT;
182
183out:
184 return err;
185}
186
187static int read_default_ldt(void __user * ptr, unsigned long bytecount)
188{
189 int err;
190
191 if (bytecount > 5*LDT_ENTRY_SIZE)
192 bytecount = 5*LDT_ENTRY_SIZE;
193
194 err = bytecount;
195 /*
196 * UML doesn't support lcall7 and lcall27.
197 * So, we don't really have a default ldt, but emulate
198 * an empty ldt of common host default ldt size.
199 */
200 if (clear_user(ptr, bytecount))
201 err = -EFAULT;
202
203 return err;
204}
205
206static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
207{
208 uml_ldt_t * ldt = &current->mm->context.ldt;
209 struct mm_id * mm_idp = &current->mm->context.id;
210 int i, err;
211 struct user_desc ldt_info;
212 struct ldt_entry entry0, *ldt_p;
213 void *addr = NULL;
214
215 err = -EINVAL;
216 if (bytecount != sizeof(ldt_info))
217 goto out;
218 err = -EFAULT;
219 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
220 goto out;
221
222 err = -EINVAL;
223 if (ldt_info.entry_number >= LDT_ENTRIES)
224 goto out;
225 if (ldt_info.contents == 3) {
226 if (func == 1)
227 goto out;
228 if (ldt_info.seg_not_present == 0)
229 goto out;
230 }
231
232 if (!ptrace_ldt)
233 mutex_lock(&ldt->lock);
234
235 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
236 if (err)
237 goto out_unlock;
238 else if (ptrace_ldt) {
239 /* With PTRACE_LDT available, this is used as a flag only */
240 ldt->entry_count = 1;
241 goto out;
242 }
243
244 if (ldt_info.entry_number >= ldt->entry_count &&
245 ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
246 for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
247 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
248 i++) {
249 if (i == 0)
250 memcpy(&entry0, ldt->u.entries,
251 sizeof(entry0));
252 ldt->u.pages[i] = (struct ldt_entry *)
253 __get_free_page(GFP_KERNEL|__GFP_ZERO);
254 if (!ldt->u.pages[i]) {
255 err = -ENOMEM;
256 /* Undo the change in host */
257 memset(&ldt_info, 0, sizeof(ldt_info));
258 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
259 goto out_unlock;
260 }
261 if (i == 0) {
262 memcpy(ldt->u.pages[0], &entry0,
263 sizeof(entry0));
264 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
265 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
266 }
267 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
268 }
269 }
270 if (ldt->entry_count <= ldt_info.entry_number)
271 ldt->entry_count = ldt_info.entry_number + 1;
272
273 if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
274 ldt_p = ldt->u.entries + ldt_info.entry_number;
275 else
276 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
277 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
278
279 if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
280 (func == 1 || LDT_empty(&ldt_info))) {
281 ldt_p->a = 0;
282 ldt_p->b = 0;
283 }
284 else{
285 if (func == 1)
286 ldt_info.useable = 0;
287 ldt_p->a = LDT_entry_a(&ldt_info);
288 ldt_p->b = LDT_entry_b(&ldt_info);
289 }
290 err = 0;
291
292out_unlock:
293 mutex_unlock(&ldt->lock);
294out:
295 return err;
296}
297
298static long do_modify_ldt_skas(int func, void __user *ptr,
299 unsigned long bytecount)
300{
301 int ret = -ENOSYS;
302
303 switch (func) {
304 case 0:
305 ret = read_ldt(ptr, bytecount);
306 break;
307 case 1:
308 case 0x11:
309 ret = write_ldt(ptr, bytecount, func);
310 break;
311 case 2:
312 ret = read_default_ldt(ptr, bytecount);
313 break;
314 }
315 return ret;
316}
317
318static DEFINE_SPINLOCK(host_ldt_lock);
319static short dummy_list[9] = {0, -1};
320static short * host_ldt_entries = NULL;
321
322static void ldt_get_host_info(void)
323{
324 long ret;
325 struct ldt_entry * ldt;
326 short *tmp;
327 int i, size, k, order;
328
329 spin_lock(&host_ldt_lock);
330
331 if (host_ldt_entries != NULL) {
332 spin_unlock(&host_ldt_lock);
333 return;
334 }
335 host_ldt_entries = dummy_list+1;
336
337 spin_unlock(&host_ldt_lock);
338
339 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
340 ;
341
342 ldt = (struct ldt_entry *)
343 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
344 if (ldt == NULL) {
345 printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
346 "for host ldt\n");
347 return;
348 }
349
350 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
351 if (ret < 0) {
352 printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
353 goto out_free;
354 }
355 if (ret == 0) {
356 /* default_ldt is active, simply write an empty entry 0 */
357 host_ldt_entries = dummy_list;
358 goto out_free;
359 }
360
361 for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
362 if (ldt[i].a != 0 || ldt[i].b != 0)
363 size++;
364 }
365
366 if (size < ARRAY_SIZE(dummy_list))
367 host_ldt_entries = dummy_list;
368 else {
369 size = (size + 1) * sizeof(dummy_list[0]);
370 tmp = kmalloc(size, GFP_KERNEL);
371 if (tmp == NULL) {
372 printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
373 "host ldt list\n");
374 goto out_free;
375 }
376 host_ldt_entries = tmp;
377 }
378
379 for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
380 if (ldt[i].a != 0 || ldt[i].b != 0)
381 host_ldt_entries[k++] = i;
382 }
383 host_ldt_entries[k] = -1;
384
385out_free:
386 free_pages((unsigned long)ldt, order);
387}
388
389long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
390{
391 struct user_desc desc;
392 short * num_p;
393 int i;
394 long page, err=0;
395 void *addr = NULL;
396 struct proc_mm_op copy;
397
398
399 if (!ptrace_ldt)
400 mutex_init(&new_mm->ldt.lock);
401
402 if (!from_mm) {
403 memset(&desc, 0, sizeof(desc));
404 /*
405 * We have to initialize a clean ldt.
406 */
407 if (proc_mm) {
408 /*
409 * If the new mm was created using proc_mm, host's
410 * default-ldt currently is assigned, which normally
411 * contains the call-gates for lcall7 and lcall27.
412 * To remove these gates, we simply write an empty
413 * entry as number 0 to the host.
414 */
415 err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
416 }
417 else{
418 /*
419 * Now we try to retrieve info about the ldt, we
420 * inherited from the host. All ldt-entries found
421 * will be reset in the following loop
422 */
423 ldt_get_host_info();
424 for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
425 desc.entry_number = *num_p;
426 err = write_ldt_entry(&new_mm->id, 1, &desc,
427 &addr, *(num_p + 1) == -1);
428 if (err)
429 break;
430 }
431 }
432 new_mm->ldt.entry_count = 0;
433
434 goto out;
435 }
436
437 if (proc_mm) {
438 /*
439 * We have a valid from_mm, so we now have to copy the LDT of
440 * from_mm to new_mm, because using proc_mm an new mm with
441 * an empty/default LDT was created in new_mm()
442 */
443 copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
444 .u =
445 { .copy_segments =
446 from_mm->id.u.mm_fd } } );
447 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
448 if (i != sizeof(copy))
449 printk(KERN_ERR "new_mm : /proc/mm copy_segments "
450 "failed, err = %d\n", -i);
451 }
452
453 if (!ptrace_ldt) {
454 /*
455 * Our local LDT is used to supply the data for
456 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
457 * i.e., we have to use the stub for modify_ldt, which
458 * can't handle the big read buffer of up to 64kB.
459 */
460 mutex_lock(&from_mm->ldt.lock);
461 if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
462 memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
463 sizeof(new_mm->ldt.u.entries));
464 else {
465 i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
466 while (i-->0) {
467 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
468 if (!page) {
469 err = -ENOMEM;
470 break;
471 }
472 new_mm->ldt.u.pages[i] =
473 (struct ldt_entry *) page;
474 memcpy(new_mm->ldt.u.pages[i],
475 from_mm->ldt.u.pages[i], PAGE_SIZE);
476 }
477 }
478 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
479 mutex_unlock(&from_mm->ldt.lock);
480 }
481
482 out:
483 return err;
484}
485
486
487void free_ldt(struct mm_context *mm)
488{
489 int i;
490
491 if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
492 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
493 while (i-- > 0)
494 free_page((long) mm->ldt.u.pages[i]);
495 }
496 mm->ldt.entry_count = 0;
497}
498
499int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
500{
501 return do_modify_ldt_skas(func, ptr, bytecount);
502}
diff --git a/arch/um/sys-i386/mem.c b/arch/um/sys-i386/mem.c
new file mode 100644
index 00000000000..639900a6fde
--- /dev/null
+++ b/arch/um/sys-i386/mem.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/mm.h>
10#include <asm/page.h>
11#include <asm/mman.h>
12
13static struct vm_area_struct gate_vma;
14
15static int __init gate_vma_init(void)
16{
17 if (!FIXADDR_USER_START)
18 return 0;
19
20 gate_vma.vm_mm = NULL;
21 gate_vma.vm_start = FIXADDR_USER_START;
22 gate_vma.vm_end = FIXADDR_USER_END;
23 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24 gate_vma.vm_page_prot = __P101;
25
26 /*
27 * Make sure the vDSO gets into every core dump.
28 * Dumping its contents makes post-mortem fully interpretable later
29 * without matching up the same kernel and hardware config to see
30 * what PC values meant.
31 */
32 gate_vma.vm_flags |= VM_ALWAYSDUMP;
33
34 return 0;
35}
36__initcall(gate_vma_init);
37
38struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
39{
40 return FIXADDR_USER_START ? &gate_vma : NULL;
41}
42
43int in_gate_area_no_mm(unsigned long addr)
44{
45 if (!FIXADDR_USER_START)
46 return 0;
47
48 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
49 return 1;
50
51 return 0;
52}
53
54int in_gate_area(struct mm_struct *mm, unsigned long addr)
55{
56 struct vm_area_struct *vma = get_gate_vma(mm);
57
58 if (!vma)
59 return 0;
60
61 return (addr >= vma->vm_start) && (addr < vma->vm_end);
62}
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
new file mode 100644
index 00000000000..3375c271785
--- /dev/null
+++ b/arch/um/sys-i386/ptrace.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "linux/sched.h"
8#include "asm/uaccess.h"
9#include "skas.h"
10
11extern int arch_switch_tls(struct task_struct *to);
12
13void arch_switch_to(struct task_struct *to)
14{
15 int err = arch_switch_tls(to);
16 if (!err)
17 return;
18
19 if (err != -EINVAL)
20 printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
21 "not EINVAL\n", -err);
22 else
23 printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
24}
25
26int is_syscall(unsigned long addr)
27{
28 unsigned short instr;
29 int n;
30
31 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
32 if (n) {
33 /* access_process_vm() grants access to vsyscall and stub,
34 * while copy_from_user doesn't. Maybe access_process_vm is
35 * slow, but that doesn't matter, since it will be called only
36 * in case of singlestepping, if copy_from_user failed.
37 */
38 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
39 if (n != sizeof(instr)) {
40 printk(KERN_ERR "is_syscall : failed to read "
41 "instruction from 0x%lx\n", addr);
42 return 1;
43 }
44 }
45 /* int 0x80 or sysenter */
46 return (instr == 0x80cd) || (instr == 0x340f);
47}
48
49/* determines which flags the user has access to. */
50/* 1 = access 0 = no access */
51#define FLAG_MASK 0x00044dd5
52
53int putreg(struct task_struct *child, int regno, unsigned long value)
54{
55 regno >>= 2;
56 switch (regno) {
57 case FS:
58 if (value && (value & 3) != 3)
59 return -EIO;
60 PT_REGS_FS(&child->thread.regs) = value;
61 return 0;
62 case GS:
63 if (value && (value & 3) != 3)
64 return -EIO;
65 PT_REGS_GS(&child->thread.regs) = value;
66 return 0;
67 case DS:
68 case ES:
69 if (value && (value & 3) != 3)
70 return -EIO;
71 value &= 0xffff;
72 break;
73 case SS:
74 case CS:
75 if ((value & 3) != 3)
76 return -EIO;
77 value &= 0xffff;
78 break;
79 case EFL:
80 value &= FLAG_MASK;
81 value |= PT_REGS_EFLAGS(&child->thread.regs);
82 break;
83 }
84 PT_REGS_SET(&child->thread.regs, regno, value);
85 return 0;
86}
87
88int poke_user(struct task_struct *child, long addr, long data)
89{
90 if ((addr & 3) || addr < 0)
91 return -EIO;
92
93 if (addr < MAX_REG_OFFSET)
94 return putreg(child, addr, data);
95 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
96 (addr <= offsetof(struct user, u_debugreg[7]))) {
97 addr -= offsetof(struct user, u_debugreg[0]);
98 addr = addr >> 2;
99 if ((addr == 4) || (addr == 5))
100 return -EIO;
101 child->thread.arch.debugregs[addr] = data;
102 return 0;
103 }
104 return -EIO;
105}
106
107unsigned long getreg(struct task_struct *child, int regno)
108{
109 unsigned long retval = ~0UL;
110
111 regno >>= 2;
112 switch (regno) {
113 case FS:
114 case GS:
115 case DS:
116 case ES:
117 case SS:
118 case CS:
119 retval = 0xffff;
120 /* fall through */
121 default:
122 retval &= PT_REG(&child->thread.regs, regno);
123 }
124 return retval;
125}
126
127/* read the word at location addr in the USER area. */
128int peek_user(struct task_struct *child, long addr, long data)
129{
130 unsigned long tmp;
131
132 if ((addr & 3) || addr < 0)
133 return -EIO;
134
135 tmp = 0; /* Default return condition */
136 if (addr < MAX_REG_OFFSET) {
137 tmp = getreg(child, addr);
138 }
139 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
140 (addr <= offsetof(struct user, u_debugreg[7]))) {
141 addr -= offsetof(struct user, u_debugreg[0]);
142 addr = addr >> 2;
143 tmp = child->thread.arch.debugregs[addr];
144 }
145 return put_user(tmp, (unsigned long __user *) data);
146}
147
148static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
149{
150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
151 struct user_i387_struct fpregs;
152
153 err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
154 if (err)
155 return err;
156
157 n = copy_to_user(buf, &fpregs, sizeof(fpregs));
158 if(n > 0)
159 return -EFAULT;
160
161 return n;
162}
163
164static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
165{
166 int n, cpu = ((struct thread_info *) child->stack)->cpu;
167 struct user_i387_struct fpregs;
168
169 n = copy_from_user(&fpregs, buf, sizeof(fpregs));
170 if (n > 0)
171 return -EFAULT;
172
173 return restore_fp_registers(userspace_pid[cpu],
174 (unsigned long *) &fpregs);
175}
176
177static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
178{
179 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
180 struct user_fxsr_struct fpregs;
181
182 err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
183 if (err)
184 return err;
185
186 n = copy_to_user(buf, &fpregs, sizeof(fpregs));
187 if(n > 0)
188 return -EFAULT;
189
190 return n;
191}
192
193static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
194{
195 int n, cpu = ((struct thread_info *) child->stack)->cpu;
196 struct user_fxsr_struct fpregs;
197
198 n = copy_from_user(&fpregs, buf, sizeof(fpregs));
199 if (n > 0)
200 return -EFAULT;
201
202 return restore_fpx_registers(userspace_pid[cpu],
203 (unsigned long *) &fpregs);
204}
205
206long subarch_ptrace(struct task_struct *child, long request,
207 unsigned long addr, unsigned long data)
208{
209 int ret = -EIO;
210 void __user *datap = (void __user *) data;
211 switch (request) {
212 case PTRACE_GETFPREGS: /* Get the child FPU state. */
213 ret = get_fpregs(datap, child);
214 break;
215 case PTRACE_SETFPREGS: /* Set the child FPU state. */
216 ret = set_fpregs(datap, child);
217 break;
218 case PTRACE_GETFPXREGS: /* Get the child FPU state. */
219 ret = get_fpxregs(datap, child);
220 break;
221 case PTRACE_SETFPXREGS: /* Set the child FPU state. */
222 ret = set_fpxregs(datap, child);
223 break;
224 default:
225 ret = -EIO;
226 }
227 return ret;
228}
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
new file mode 100644
index 00000000000..0b10c3e7402
--- /dev/null
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <errno.h>
7#include <sys/ptrace.h>
8
9int ptrace_getregs(long pid, unsigned long *regs_out)
10{
11 if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
12 return -errno;
13 return 0;
14}
15
16int ptrace_setregs(long pid, unsigned long *regs)
17{
18 if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
19 return -errno;
20 return 0;
21}
diff --git a/arch/um/sys-i386/setjmp.S b/arch/um/sys-i386/setjmp.S
new file mode 100644
index 00000000000..b766792c993
--- /dev/null
+++ b/arch/um/sys-i386/setjmp.S
@@ -0,0 +1,58 @@
1#
2# arch/i386/setjmp.S
3#
4# setjmp/longjmp for the i386 architecture
5#
6
7#
8# The jmp_buf is assumed to contain the following, in order:
9# %ebx
10# %esp
11# %ebp
12# %esi
13# %edi
14# <return address>
15#
16
17 .text
18 .align 4
19 .globl setjmp
20 .type setjmp, @function
21setjmp:
22#ifdef _REGPARM
23 movl %eax,%edx
24#else
25 movl 4(%esp),%edx
26#endif
27 popl %ecx # Return address, and adjust the stack
28 xorl %eax,%eax # Return value
29 movl %ebx,(%edx)
30 movl %esp,4(%edx) # Post-return %esp!
31 pushl %ecx # Make the call/return stack happy
32 movl %ebp,8(%edx)
33 movl %esi,12(%edx)
34 movl %edi,16(%edx)
35 movl %ecx,20(%edx) # Return address
36 ret
37
38 .size setjmp,.-setjmp
39
40 .text
41 .align 4
42 .globl longjmp
43 .type longjmp, @function
44longjmp:
45#ifdef _REGPARM
46 xchgl %eax,%edx
47#else
48 movl 4(%esp),%edx # jmp_ptr address
49 movl 8(%esp),%eax # Return value
50#endif
51 movl (%edx),%ebx
52 movl 4(%edx),%esp
53 movl 8(%edx),%ebp
54 movl 12(%edx),%esi
55 movl 16(%edx),%edi
56 jmp *20(%edx)
57
58 .size longjmp,.-longjmp
diff --git a/arch/um/sys-i386/shared/sysdep/archsetjmp.h b/arch/um/sys-i386/shared/sysdep/archsetjmp.h
new file mode 100644
index 00000000000..0f312085ce1
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/archsetjmp.h
@@ -0,0 +1,22 @@
1/*
2 * arch/um/include/sysdep-i386/archsetjmp.h
3 */
4
5#ifndef _KLIBC_ARCHSETJMP_H
6#define _KLIBC_ARCHSETJMP_H
7
8struct __jmp_buf {
9 unsigned int __ebx;
10 unsigned int __esp;
11 unsigned int __ebp;
12 unsigned int __esi;
13 unsigned int __edi;
14 unsigned int __eip;
15};
16
17typedef struct __jmp_buf jmp_buf[1];
18
19#define JB_IP __eip
20#define JB_SP __esp
21
22#endif /* _SETJMP_H */
diff --git a/arch/um/sys-i386/shared/sysdep/barrier.h b/arch/um/sys-i386/shared/sysdep/barrier.h
new file mode 100644
index 00000000000..b58d52c5b2f
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/barrier.h
@@ -0,0 +1,9 @@
1#ifndef __SYSDEP_I386_BARRIER_H
2#define __SYSDEP_I386_BARRIER_H
3
4/* Copied from include/asm-i386 for use by userspace. i386 has the option
5 * of using mfence, but I'm just using this, which works everywhere, for now.
6 */
7#define mb() asm volatile("lock; addl $0,0(%esp)")
8
9#endif
diff --git a/arch/um/sys-i386/shared/sysdep/checksum.h b/arch/um/sys-i386/shared/sysdep/checksum.h
new file mode 100644
index 00000000000..ed47445f390
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/checksum.h
@@ -0,0 +1,201 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/in6.h"
9#include "linux/string.h"
10
11/*
12 * computes the checksum of a memory block at buff, length len,
13 * and adds in "sum" (32-bit)
14 *
15 * returns a 32-bit number suitable for feeding into itself
16 * or csum_tcpudp_magic
17 *
18 * this function must be called with even lengths, except
19 * for the last fragment, which may be odd
20 *
21 * it's best to have buff aligned on a 32-bit boundary
22 */
23__wsum csum_partial(const void *buff, int len, __wsum sum);
24
25/*
26 * Note: when you get a NULL pointer exception here this means someone
27 * passed in an incorrect kernel address to one of these functions.
28 *
29 * If you use these functions directly please don't forget the
30 * access_ok().
31 */
32
33static __inline__
34__wsum csum_partial_copy_nocheck(const void *src, void *dst,
35 int len, __wsum sum)
36{
37 memcpy(dst, src, len);
38 return csum_partial(dst, len, sum);
39}
40
41/*
42 * the same as csum_partial, but copies from src while it
43 * checksums, and handles user-space pointer exceptions correctly, when needed.
44 *
45 * here even more important to align src and dst on a 32-bit (or even
46 * better 64-bit) boundary
47 */
48
49static __inline__
50__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
51 int len, __wsum sum, int *err_ptr)
52{
53 if (copy_from_user(dst, src, len)) {
54 *err_ptr = -EFAULT;
55 return (__force __wsum)-1;
56 }
57
58 return csum_partial(dst, len, sum);
59}
60
61/*
62 * This is a version of ip_compute_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries.
64 *
65 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
66 * Arnt Gulbrandsen.
67 */
68static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
69{
70 unsigned int sum;
71
72 __asm__ __volatile__(
73 "movl (%1), %0 ;\n"
74 "subl $4, %2 ;\n"
75 "jbe 2f ;\n"
76 "addl 4(%1), %0 ;\n"
77 "adcl 8(%1), %0 ;\n"
78 "adcl 12(%1), %0 ;\n"
79"1: adcl 16(%1), %0 ;\n"
80 "lea 4(%1), %1 ;\n"
81 "decl %2 ;\n"
82 "jne 1b ;\n"
83 "adcl $0, %0 ;\n"
84 "movl %0, %2 ;\n"
85 "shrl $16, %0 ;\n"
86 "addw %w2, %w0 ;\n"
87 "adcl $0, %0 ;\n"
88 "notl %0 ;\n"
89"2: ;\n"
90 /* Since the input registers which are loaded with iph and ipl
91 are modified, we must also specify them as outputs, or gcc
92 will assume they contain their original values. */
93 : "=r" (sum), "=r" (iph), "=r" (ihl)
94 : "1" (iph), "2" (ihl)
95 : "memory");
96 return (__force __sum16)sum;
97}
98
99/*
100 * Fold a partial checksum
101 */
102
103static inline __sum16 csum_fold(__wsum sum)
104{
105 __asm__(
106 "addl %1, %0 ;\n"
107 "adcl $0xffff, %0 ;\n"
108 : "=r" (sum)
109 : "r" ((__force u32)sum << 16),
110 "0" ((__force u32)sum & 0xffff0000)
111 );
112 return (__force __sum16)(~(__force u32)sum >> 16);
113}
114
115static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
116 unsigned short len,
117 unsigned short proto,
118 __wsum sum)
119{
120 __asm__(
121 "addl %1, %0 ;\n"
122 "adcl %2, %0 ;\n"
123 "adcl %3, %0 ;\n"
124 "adcl $0, %0 ;\n"
125 : "=r" (sum)
126 : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
127 return sum;
128}
129
130/*
131 * computes the checksum of the TCP/UDP pseudo-header
132 * returns a 16-bit checksum, already complemented
133 */
134static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
135 unsigned short len,
136 unsigned short proto,
137 __wsum sum)
138{
139 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
140}
141
142/*
143 * this routine is used for miscellaneous IP-like checksums, mainly
144 * in icmp.c
145 */
146
147static inline __sum16 ip_compute_csum(const void *buff, int len)
148{
149 return csum_fold (csum_partial(buff, len, 0));
150}
151
152#define _HAVE_ARCH_IPV6_CSUM
153static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
154 const struct in6_addr *daddr,
155 __u32 len, unsigned short proto,
156 __wsum sum)
157{
158 __asm__(
159 "addl 0(%1), %0 ;\n"
160 "adcl 4(%1), %0 ;\n"
161 "adcl 8(%1), %0 ;\n"
162 "adcl 12(%1), %0 ;\n"
163 "adcl 0(%2), %0 ;\n"
164 "adcl 4(%2), %0 ;\n"
165 "adcl 8(%2), %0 ;\n"
166 "adcl 12(%2), %0 ;\n"
167 "adcl %3, %0 ;\n"
168 "adcl %4, %0 ;\n"
169 "adcl $0, %0 ;\n"
170 : "=&r" (sum)
171 : "r" (saddr), "r" (daddr),
172 "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
173
174 return csum_fold(sum);
175}
176
177/*
178 * Copy and checksum to user
179 */
180#define HAVE_CSUM_COPY_USER
181static __inline__ __wsum csum_and_copy_to_user(const void *src,
182 void __user *dst,
183 int len, __wsum sum, int *err_ptr)
184{
185 if (access_ok(VERIFY_WRITE, dst, len)) {
186 if (copy_to_user(dst, src, len)) {
187 *err_ptr = -EFAULT;
188 return (__force __wsum)-1;
189 }
190
191 return csum_partial(src, len, sum);
192 }
193
194 if (len)
195 *err_ptr = -EFAULT;
196
197 return (__force __wsum)-1; /* invalid checksum */
198}
199
200#endif
201
diff --git a/arch/um/sys-i386/shared/sysdep/faultinfo.h b/arch/um/sys-i386/shared/sysdep/faultinfo.h
new file mode 100644
index 00000000000..db437cc373b
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/faultinfo.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
4 * Licensed under the GPL
5 */
6
7#ifndef __FAULTINFO_I386_H
8#define __FAULTINFO_I386_H
9
10/* this structure contains the full arch-specific faultinfo
11 * from the traps.
12 * On i386, ptrace_faultinfo unfortunately doesn't provide
13 * all the info, since trap_no is missing.
14 * All common elements are defined at the same position in
15 * both structures, thus making it easy to copy the
16 * contents without knowledge about the structure elements.
17 */
18struct faultinfo {
19 int error_code; /* in ptrace_faultinfo misleadingly called is_write */
20 unsigned long cr2; /* in ptrace_faultinfo called addr */
21 int trap_no; /* missing in ptrace_faultinfo */
22};
23
24#define FAULT_WRITE(fi) ((fi).error_code & 2)
25#define FAULT_ADDRESS(fi) ((fi).cr2)
26
27#define PTRACE_FULL_FAULTINFO 0
28
29#endif
diff --git a/arch/um/sys-i386/shared/sysdep/host_ldt.h b/arch/um/sys-i386/shared/sysdep/host_ldt.h
new file mode 100644
index 00000000000..0953cc4df65
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/host_ldt.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_HOST_LDT_I386_H
2#define __ASM_HOST_LDT_I386_H
3
4#include <asm/ldt.h>
5
6/*
7 * macros stolen from include/asm-i386/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12#define LDT_entry_b(info) \
13 (((info)->base_addr & 0xff000000) | \
14 (((info)->base_addr & 0x00ff0000) >> 16) | \
15 ((info)->limit & 0xf0000) | \
16 (((info)->read_exec_only ^ 1) << 9) | \
17 ((info)->contents << 10) | \
18 (((info)->seg_not_present ^ 1) << 15) | \
19 ((info)->seg_32bit << 22) | \
20 ((info)->limit_in_pages << 23) | \
21 ((info)->useable << 20) | \
22 0x7000)
23
24#define LDT_empty(info) (\
25 (info)->base_addr == 0 && \
26 (info)->limit == 0 && \
27 (info)->contents == 0 && \
28 (info)->read_exec_only == 1 && \
29 (info)->seg_32bit == 0 && \
30 (info)->limit_in_pages == 0 && \
31 (info)->seg_not_present == 1 && \
32 (info)->useable == 0 )
33
34#endif
diff --git a/arch/um/sys-i386/shared/sysdep/kernel-offsets.h b/arch/um/sys-i386/shared/sysdep/kernel-offsets.h
new file mode 100644
index 00000000000..5868526b5ee
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/kernel-offsets.h
@@ -0,0 +1,21 @@
1#include <linux/stddef.h>
2#include <linux/sched.h>
3#include <linux/elf.h>
4#include <linux/crypto.h>
5#include <asm/mman.h>
6
7#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9
10#define STR(x) #x
11#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
12
13#define BLANK() asm volatile("\n->" : : )
14
15#define OFFSET(sym, str, mem) \
16 DEFINE(sym, offsetof(struct str, mem));
17
18void foo(void)
19{
20#include <common-offsets.h>
21}
diff --git a/arch/um/sys-i386/shared/sysdep/ptrace.h b/arch/um/sys-i386/shared/sysdep/ptrace.h
new file mode 100644
index 00000000000..c398a507611
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/ptrace.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_H
7#define __SYSDEP_I386_PTRACE_H
8
9#include "user_constants.h"
10#include "sysdep/faultinfo.h"
11
12#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
13#define MAX_REG_OFFSET (UM_FRAME_SIZE)
14
15static inline void update_debugregs(int seq) {}
16
17/* syscall emulation path in ptrace */
18
19#ifndef PTRACE_SYSEMU
20#define PTRACE_SYSEMU 31
21#endif
22
23void set_using_sysemu(int value);
24int get_using_sysemu(void);
25extern int sysemu_supported;
26
27#include "skas_ptregs.h"
28
29#define REGS_IP(r) ((r)[HOST_IP])
30#define REGS_SP(r) ((r)[HOST_SP])
31#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
32#define REGS_EAX(r) ((r)[HOST_EAX])
33#define REGS_EBX(r) ((r)[HOST_EBX])
34#define REGS_ECX(r) ((r)[HOST_ECX])
35#define REGS_EDX(r) ((r)[HOST_EDX])
36#define REGS_ESI(r) ((r)[HOST_ESI])
37#define REGS_EDI(r) ((r)[HOST_EDI])
38#define REGS_EBP(r) ((r)[HOST_EBP])
39#define REGS_CS(r) ((r)[HOST_CS])
40#define REGS_SS(r) ((r)[HOST_SS])
41#define REGS_DS(r) ((r)[HOST_DS])
42#define REGS_ES(r) ((r)[HOST_ES])
43#define REGS_FS(r) ((r)[HOST_FS])
44#define REGS_GS(r) ((r)[HOST_GS])
45
46#define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res)
47
48#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
49
50#ifndef PTRACE_SYSEMU_SINGLESTEP
51#define PTRACE_SYSEMU_SINGLESTEP 32
52#endif
53
54struct uml_pt_regs {
55 unsigned long gp[MAX_REG_NR];
56 unsigned long fp[HOST_FPX_SIZE];
57 struct faultinfo faultinfo;
58 long syscall;
59 int is_user;
60};
61
62#define EMPTY_UML_PT_REGS { }
63
64#define UPT_IP(r) REGS_IP((r)->gp)
65#define UPT_SP(r) REGS_SP((r)->gp)
66#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
67#define UPT_EAX(r) REGS_EAX((r)->gp)
68#define UPT_EBX(r) REGS_EBX((r)->gp)
69#define UPT_ECX(r) REGS_ECX((r)->gp)
70#define UPT_EDX(r) REGS_EDX((r)->gp)
71#define UPT_ESI(r) REGS_ESI((r)->gp)
72#define UPT_EDI(r) REGS_EDI((r)->gp)
73#define UPT_EBP(r) REGS_EBP((r)->gp)
74#define UPT_ORIG_EAX(r) ((r)->syscall)
75#define UPT_CS(r) REGS_CS((r)->gp)
76#define UPT_SS(r) REGS_SS((r)->gp)
77#define UPT_DS(r) REGS_DS((r)->gp)
78#define UPT_ES(r) REGS_ES((r)->gp)
79#define UPT_FS(r) REGS_FS((r)->gp)
80#define UPT_GS(r) REGS_GS((r)->gp)
81
82#define UPT_SYSCALL_ARG1(r) UPT_EBX(r)
83#define UPT_SYSCALL_ARG2(r) UPT_ECX(r)
84#define UPT_SYSCALL_ARG3(r) UPT_EDX(r)
85#define UPT_SYSCALL_ARG4(r) UPT_ESI(r)
86#define UPT_SYSCALL_ARG5(r) UPT_EDI(r)
87#define UPT_SYSCALL_ARG6(r) UPT_EBP(r)
88
89extern int user_context(unsigned long sp);
90
91#define UPT_IS_USER(r) ((r)->is_user)
92
93struct syscall_args {
94 unsigned long args[6];
95};
96
97#define SYSCALL_ARGS(r) ((struct syscall_args) \
98 { .args = { UPT_SYSCALL_ARG1(r), \
99 UPT_SYSCALL_ARG2(r), \
100 UPT_SYSCALL_ARG3(r), \
101 UPT_SYSCALL_ARG4(r), \
102 UPT_SYSCALL_ARG5(r), \
103 UPT_SYSCALL_ARG6(r) } } )
104
105#define UPT_REG(regs, reg) \
106 ({ unsigned long val; \
107 switch(reg){ \
108 case EIP: val = UPT_IP(regs); break; \
109 case UESP: val = UPT_SP(regs); break; \
110 case EAX: val = UPT_EAX(regs); break; \
111 case EBX: val = UPT_EBX(regs); break; \
112 case ECX: val = UPT_ECX(regs); break; \
113 case EDX: val = UPT_EDX(regs); break; \
114 case ESI: val = UPT_ESI(regs); break; \
115 case EDI: val = UPT_EDI(regs); break; \
116 case EBP: val = UPT_EBP(regs); break; \
117 case ORIG_EAX: val = UPT_ORIG_EAX(regs); break; \
118 case CS: val = UPT_CS(regs); break; \
119 case SS: val = UPT_SS(regs); break; \
120 case DS: val = UPT_DS(regs); break; \
121 case ES: val = UPT_ES(regs); break; \
122 case FS: val = UPT_FS(regs); break; \
123 case GS: val = UPT_GS(regs); break; \
124 case EFL: val = UPT_EFLAGS(regs); break; \
125 default : \
126 panic("Bad register in UPT_REG : %d\n", reg); \
127 val = -1; \
128 } \
129 val; \
130 })
131
132#define UPT_SET(regs, reg, val) \
133 do { \
134 switch(reg){ \
135 case EIP: UPT_IP(regs) = val; break; \
136 case UESP: UPT_SP(regs) = val; break; \
137 case EAX: UPT_EAX(regs) = val; break; \
138 case EBX: UPT_EBX(regs) = val; break; \
139 case ECX: UPT_ECX(regs) = val; break; \
140 case EDX: UPT_EDX(regs) = val; break; \
141 case ESI: UPT_ESI(regs) = val; break; \
142 case EDI: UPT_EDI(regs) = val; break; \
143 case EBP: UPT_EBP(regs) = val; break; \
144 case ORIG_EAX: UPT_ORIG_EAX(regs) = val; break; \
145 case CS: UPT_CS(regs) = val; break; \
146 case SS: UPT_SS(regs) = val; break; \
147 case DS: UPT_DS(regs) = val; break; \
148 case ES: UPT_ES(regs) = val; break; \
149 case FS: UPT_FS(regs) = val; break; \
150 case GS: UPT_GS(regs) = val; break; \
151 case EFL: UPT_EFLAGS(regs) = val; break; \
152 default : \
153 panic("Bad register in UPT_SET : %d\n", reg); \
154 break; \
155 } \
156 } while (0)
157
158#define UPT_SET_SYSCALL_RETURN(r, res) \
159 REGS_SET_SYSCALL_RETURN((r)->regs, (res))
160
161#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
162
163#define UPT_ORIG_SYSCALL(r) UPT_EAX(r)
164#define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r)
165#define UPT_SYSCALL_RET(r) UPT_EAX(r)
166
167#define UPT_FAULTINFO(r) (&(r)->faultinfo)
168
169extern void arch_init_registers(int pid);
170
171#endif
diff --git a/arch/um/sys-i386/shared/sysdep/ptrace_user.h b/arch/um/sys-i386/shared/sysdep/ptrace_user.h
new file mode 100644
index 00000000000..ef56247e414
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/ptrace_user.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_PTRACE_USER_H__
7#define __SYSDEP_I386_PTRACE_USER_H__
8
9#include <sys/ptrace.h>
10#include <linux/ptrace.h>
11#include <asm/ptrace.h>
12#include "user_constants.h"
13
14#define PT_OFFSET(r) ((r) * sizeof(long))
15
16#define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX])
17#define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX)
18
19#define PT_SYSCALL_ARG1_OFFSET PT_OFFSET(EBX)
20#define PT_SYSCALL_ARG2_OFFSET PT_OFFSET(ECX)
21#define PT_SYSCALL_ARG3_OFFSET PT_OFFSET(EDX)
22#define PT_SYSCALL_ARG4_OFFSET PT_OFFSET(ESI)
23#define PT_SYSCALL_ARG5_OFFSET PT_OFFSET(EDI)
24#define PT_SYSCALL_ARG6_OFFSET PT_OFFSET(EBP)
25
26#define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX)
27
28#define REGS_SYSCALL_NR EAX /* This is used before a system call */
29#define REGS_SYSCALL_ARG1 EBX
30#define REGS_SYSCALL_ARG2 ECX
31#define REGS_SYSCALL_ARG3 EDX
32#define REGS_SYSCALL_ARG4 ESI
33#define REGS_SYSCALL_ARG5 EDI
34#define REGS_SYSCALL_ARG6 EBP
35
36#define REGS_IP_INDEX EIP
37#define REGS_SP_INDEX UESP
38
39#define PT_IP_OFFSET PT_OFFSET(EIP)
40#define PT_IP(regs) ((regs)[EIP])
41#define PT_SP_OFFSET PT_OFFSET(UESP)
42#define PT_SP(regs) ((regs)[UESP])
43
44#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
45
46#ifndef FRAME_SIZE
47#define FRAME_SIZE (17)
48#endif
49
50#endif
diff --git a/arch/um/sys-i386/shared/sysdep/sc.h b/arch/um/sys-i386/shared/sysdep/sc.h
new file mode 100644
index 00000000000..c57d1780ad3
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/sc.h
@@ -0,0 +1,44 @@
1#ifndef __SYSDEP_I386_SC_H
2#define __SYSDEP_I386_SC_H
3
4#include <user_constants.h>
5
6#define SC_OFFSET(sc, field) \
7 *((unsigned long *) &(((char *) (sc))[HOST_##field]))
8#define SC_FP_OFFSET(sc, field) \
9 *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
10#define SC_FP_OFFSET_PTR(sc, field, type) \
11 ((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
12
13#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
14#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
15#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
16#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
17#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
18#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
19#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
20#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
21#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
22#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX)
23#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX)
24#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX)
25#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX)
26#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI)
27#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI)
28#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP)
29#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
30#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
31#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
32#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE)
33#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
34#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW)
35#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW)
36#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG)
37#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF)
38#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL)
39#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF)
40#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL)
41#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate)
42#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void)
43
44#endif
diff --git a/arch/um/sys-i386/shared/sysdep/sigcontext.h b/arch/um/sys-i386/shared/sysdep/sigcontext.h
new file mode 100644
index 00000000000..f583c87111a
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/sigcontext.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYS_SIGCONTEXT_I386_H
7#define __SYS_SIGCONTEXT_I386_H
8
9#include "sysdep/sc.h"
10
11#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
12
13#define GET_FAULTINFO_FROM_SC(fi, sc) \
14 { \
15 (fi).cr2 = SC_CR2(sc); \
16 (fi).error_code = SC_ERR(sc); \
17 (fi).trap_no = SC_TRAPNO(sc); \
18 }
19
20/* This is Page Fault */
21#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
22
23/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
24#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
25
26#endif
diff --git a/arch/um/sys-i386/shared/sysdep/skas_ptrace.h b/arch/um/sys-i386/shared/sysdep/skas_ptrace.h
new file mode 100644
index 00000000000..e27b8a79177
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/skas_ptrace.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_I386_SKAS_PTRACE_H
7#define __SYSDEP_I386_SKAS_PTRACE_H
8
9struct ptrace_faultinfo {
10 int is_write;
11 unsigned long addr;
12};
13
14struct ptrace_ldt {
15 int func;
16 void *ptr;
17 unsigned long bytecount;
18};
19
20#define PTRACE_LDT 54
21
22#endif
diff --git a/arch/um/sys-i386/shared/sysdep/stub.h b/arch/um/sys-i386/shared/sysdep/stub.h
new file mode 100644
index 00000000000..977dedd9221
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/stub.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <sys/mman.h>
10#include <asm/ptrace.h>
11#include <asm/unistd.h>
12#include "as-layout.h"
13#include "stub-data.h"
14#include "kern_constants.h"
15
16extern void stub_segv_handler(int sig);
17extern void stub_clone_handler(void);
18
19#define STUB_SYSCALL_RET EAX
20#define STUB_MMAP_NR __NR_mmap2
21#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
22
23static inline long stub_syscall0(long syscall)
24{
25 long ret;
26
27 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
28
29 return ret;
30}
31
32static inline long stub_syscall1(long syscall, long arg1)
33{
34 long ret;
35
36 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
37
38 return ret;
39}
40
41static inline long stub_syscall2(long syscall, long arg1, long arg2)
42{
43 long ret;
44
45 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
46 "c" (arg2));
47
48 return ret;
49}
50
51static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
52{
53 long ret;
54
55 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
56 "c" (arg2), "d" (arg3));
57
58 return ret;
59}
60
61static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
62 long arg4)
63{
64 long ret;
65
66 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
67 "c" (arg2), "d" (arg3), "S" (arg4));
68
69 return ret;
70}
71
72static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
73 long arg4, long arg5)
74{
75 long ret;
76
77 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
78 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
79
80 return ret;
81}
82
83static inline void trap_myself(void)
84{
85 __asm("int3");
86}
87
88static inline void remap_stack(int fd, unsigned long offset)
89{
90 __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
91 "movl %7, %%ebx ; movl %%eax, (%%ebx)"
92 : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
93 "c" (UM_KERN_PAGE_SIZE),
94 "d" (PROT_READ | PROT_WRITE),
95 "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
96 "a" (offset),
97 "i" (&((struct stub_data *) STUB_DATA)->err)
98 : "memory");
99}
100
101#endif
diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h
new file mode 100644
index 00000000000..05cb796aecb
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "asm/unistd.h"
7#include "sysdep/ptrace.h"
8
9typedef long syscall_handler_t(struct pt_regs);
10
11/* Not declared on x86, incompatible declarations on x86_64, so these have
12 * to go here rather than in sys_call_table.c
13 */
14extern syscall_handler_t sys_rt_sigaction;
15
16extern syscall_handler_t *sys_call_table[];
17
18#define EXECUTE_SYSCALL(syscall, regs) \
19 ((long (*)(struct syscall_args)) \
20 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
new file mode 100644
index 00000000000..d1b93c43620
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/system.h
@@ -0,0 +1,132 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132#endif
diff --git a/arch/um/sys-i386/shared/sysdep/tls.h b/arch/um/sys-i386/shared/sysdep/tls.h
new file mode 100644
index 00000000000..34550755b2a
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/tls.h
@@ -0,0 +1,32 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20} user_desc_t;
21
22# else /* __KERNEL__ */
23
24# include <ldt.h>
25typedef struct user_desc user_desc_t;
26
27# endif /* __KERNEL__ */
28
29#define GDT_ENTRY_TLS_MIN_I386 6
30#define GDT_ENTRY_TLS_MIN_X86_64 12
31
32#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-i386/shared/sysdep/vm-flags.h b/arch/um/sys-i386/shared/sysdep/vm-flags.h
new file mode 100644
index 00000000000..e0d24c568db
--- /dev/null
+++ b/arch/um/sys-i386/shared/sysdep/vm-flags.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __VM_FLAGS_I386_H
7#define __VM_FLAGS_I386_H
8
9#define VM_DATA_DEFAULT_FLAGS \
10 (VM_READ | VM_WRITE | \
11 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
12 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
13
14#endif
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
new file mode 100644
index 00000000000..89a46626bfd
--- /dev/null
+++ b/arch/um/sys-i386/signal.c
@@ -0,0 +1,508 @@
1/*
2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/ptrace.h>
7#include <asm/unistd.h>
8#include <asm/uaccess.h>
9#include <asm/ucontext.h>
10#include "frame_kern.h"
11#include "skas.h"
12
13void copy_sc(struct uml_pt_regs *regs, void *from)
14{
15 struct sigcontext *sc = from;
16
17 REGS_GS(regs->gp) = sc->gs;
18 REGS_FS(regs->gp) = sc->fs;
19 REGS_ES(regs->gp) = sc->es;
20 REGS_DS(regs->gp) = sc->ds;
21 REGS_EDI(regs->gp) = sc->di;
22 REGS_ESI(regs->gp) = sc->si;
23 REGS_EBP(regs->gp) = sc->bp;
24 REGS_SP(regs->gp) = sc->sp;
25 REGS_EBX(regs->gp) = sc->bx;
26 REGS_EDX(regs->gp) = sc->dx;
27 REGS_ECX(regs->gp) = sc->cx;
28 REGS_EAX(regs->gp) = sc->ax;
29 REGS_IP(regs->gp) = sc->ip;
30 REGS_CS(regs->gp) = sc->cs;
31 REGS_EFLAGS(regs->gp) = sc->flags;
32 REGS_SS(regs->gp) = sc->ss;
33}
34
35/*
36 * FPU tag word conversions.
37 */
38
39static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
40{
41 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
42
43 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
44 tmp = ~twd;
45 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
46 /* and move the valid bits to the lower byte. */
47 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
48 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
49 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
50 return tmp;
51}
52
53static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
54{
55 struct _fpxreg *st = NULL;
56 unsigned long twd = (unsigned long) fxsave->twd;
57 unsigned long tag;
58 unsigned long ret = 0xffff0000;
59 int i;
60
61#define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16)
62
63 for (i = 0; i < 8; i++) {
64 if (twd & 0x1) {
65 st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
66
67 switch (st->exponent & 0x7fff) {
68 case 0x7fff:
69 tag = 2; /* Special */
70 break;
71 case 0x0000:
72 if ( !st->significand[0] &&
73 !st->significand[1] &&
74 !st->significand[2] &&
75 !st->significand[3] ) {
76 tag = 1; /* Zero */
77 } else {
78 tag = 2; /* Special */
79 }
80 break;
81 default:
82 if (st->significand[3] & 0x8000) {
83 tag = 0; /* Valid */
84 } else {
85 tag = 2; /* Special */
86 }
87 break;
88 }
89 } else {
90 tag = 3; /* Empty */
91 }
92 ret |= (tag << (2 * i));
93 twd = twd >> 1;
94 }
95 return ret;
96}
97
98static int convert_fxsr_to_user(struct _fpstate __user *buf,
99 struct user_fxsr_struct *fxsave)
100{
101 unsigned long env[7];
102 struct _fpreg __user *to;
103 struct _fpxreg *from;
104 int i;
105
106 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
107 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
108 env[2] = twd_fxsr_to_i387(fxsave);
109 env[3] = fxsave->fip;
110 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
111 env[5] = fxsave->foo;
112 env[6] = fxsave->fos;
113
114 if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
115 return 1;
116
117 to = &buf->_st[0];
118 from = (struct _fpxreg *) &fxsave->st_space[0];
119 for (i = 0; i < 8; i++, to++, from++) {
120 unsigned long __user *t = (unsigned long __user *)to;
121 unsigned long *f = (unsigned long *)from;
122
123 if (__put_user(*f, t) ||
124 __put_user(*(f + 1), t + 1) ||
125 __put_user(from->exponent, &to->exponent))
126 return 1;
127 }
128 return 0;
129}
130
131static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
132 struct _fpstate __user *buf)
133{
134 unsigned long env[7];
135 struct _fpxreg *to;
136 struct _fpreg __user *from;
137 int i;
138
139 if (copy_from_user( env, buf, 7 * sizeof(long)))
140 return 1;
141
142 fxsave->cwd = (unsigned short)(env[0] & 0xffff);
143 fxsave->swd = (unsigned short)(env[1] & 0xffff);
144 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
145 fxsave->fip = env[3];
146 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
147 fxsave->fcs = (env[4] & 0xffff);
148 fxsave->foo = env[5];
149 fxsave->fos = env[6];
150
151 to = (struct _fpxreg *) &fxsave->st_space[0];
152 from = &buf->_st[0];
153 for (i = 0; i < 8; i++, to++, from++) {
154 unsigned long *t = (unsigned long *)to;
155 unsigned long __user *f = (unsigned long __user *)from;
156
157 if (__get_user(*t, f) ||
158 __get_user(*(t + 1), f + 1) ||
159 __get_user(to->exponent, &from->exponent))
160 return 1;
161 }
162 return 0;
163}
164
165extern int have_fpx_regs;
166
167static int copy_sc_from_user(struct pt_regs *regs,
168 struct sigcontext __user *from)
169{
170 struct sigcontext sc;
171 int err, pid;
172
173 err = copy_from_user(&sc, from, sizeof(sc));
174 if (err)
175 return err;
176
177 pid = userspace_pid[current_thread_info()->cpu];
178 copy_sc(&regs->regs, &sc);
179 if (have_fpx_regs) {
180 struct user_fxsr_struct fpx;
181
182 err = copy_from_user(&fpx,
183 &((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
184 sizeof(struct user_fxsr_struct));
185 if (err)
186 return 1;
187
188 err = convert_fxsr_from_user(&fpx, sc.fpstate);
189 if (err)
190 return 1;
191
192 err = restore_fpx_registers(pid, (unsigned long *) &fpx);
193 if (err < 0) {
194 printk(KERN_ERR "copy_sc_from_user - "
195 "restore_fpx_registers failed, errno = %d\n",
196 -err);
197 return 1;
198 }
199 }
200 else {
201 struct user_i387_struct fp;
202
203 err = copy_from_user(&fp, sc.fpstate,
204 sizeof(struct user_i387_struct));
205 if (err)
206 return 1;
207
208 err = restore_fp_registers(pid, (unsigned long *) &fp);
209 if (err < 0) {
210 printk(KERN_ERR "copy_sc_from_user - "
211 "restore_fp_registers failed, errno = %d\n",
212 -err);
213 return 1;
214 }
215 }
216
217 return 0;
218}
219
220static int copy_sc_to_user(struct sigcontext __user *to,
221 struct _fpstate __user *to_fp, struct pt_regs *regs,
222 unsigned long sp)
223{
224 struct sigcontext sc;
225 struct faultinfo * fi = &current->thread.arch.faultinfo;
226 int err, pid;
227
228 sc.gs = REGS_GS(regs->regs.gp);
229 sc.fs = REGS_FS(regs->regs.gp);
230 sc.es = REGS_ES(regs->regs.gp);
231 sc.ds = REGS_DS(regs->regs.gp);
232 sc.di = REGS_EDI(regs->regs.gp);
233 sc.si = REGS_ESI(regs->regs.gp);
234 sc.bp = REGS_EBP(regs->regs.gp);
235 sc.sp = sp;
236 sc.bx = REGS_EBX(regs->regs.gp);
237 sc.dx = REGS_EDX(regs->regs.gp);
238 sc.cx = REGS_ECX(regs->regs.gp);
239 sc.ax = REGS_EAX(regs->regs.gp);
240 sc.ip = REGS_IP(regs->regs.gp);
241 sc.cs = REGS_CS(regs->regs.gp);
242 sc.flags = REGS_EFLAGS(regs->regs.gp);
243 sc.sp_at_signal = regs->regs.gp[UESP];
244 sc.ss = regs->regs.gp[SS];
245 sc.cr2 = fi->cr2;
246 sc.err = fi->error_code;
247 sc.trapno = fi->trap_no;
248
249 to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
250 sc.fpstate = to_fp;
251
252 pid = userspace_pid[current_thread_info()->cpu];
253 if (have_fpx_regs) {
254 struct user_fxsr_struct fpx;
255
256 err = save_fpx_registers(pid, (unsigned long *) &fpx);
257 if (err < 0){
258 printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
259 "failed, errno = %d\n", err);
260 return 1;
261 }
262
263 err = convert_fxsr_to_user(to_fp, &fpx);
264 if (err)
265 return 1;
266
267 err |= __put_user(fpx.swd, &to_fp->status);
268 err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
269 if (err)
270 return 1;
271
272 if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
273 sizeof(struct user_fxsr_struct)))
274 return 1;
275 }
276 else {
277 struct user_i387_struct fp;
278
279 err = save_fp_registers(pid, (unsigned long *) &fp);
280 if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
281 return 1;
282 }
283
284 return copy_to_user(to, &sc, sizeof(sc));
285}
286
287static int copy_ucontext_to_user(struct ucontext __user *uc,
288 struct _fpstate __user *fp, sigset_t *set,
289 unsigned long sp)
290{
291 int err = 0;
292
293 err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
294 err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
295 err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
296 err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
297 err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
298 return err;
299}
300
301struct sigframe
302{
303 char __user *pretcode;
304 int sig;
305 struct sigcontext sc;
306 struct _fpstate fpstate;
307 unsigned long extramask[_NSIG_WORDS-1];
308 char retcode[8];
309};
310
311struct rt_sigframe
312{
313 char __user *pretcode;
314 int sig;
315 struct siginfo __user *pinfo;
316 void __user *puc;
317 struct siginfo info;
318 struct ucontext uc;
319 struct _fpstate fpstate;
320 char retcode[8];
321};
322
323int setup_signal_stack_sc(unsigned long stack_top, int sig,
324 struct k_sigaction *ka, struct pt_regs *regs,
325 sigset_t *mask)
326{
327 struct sigframe __user *frame;
328 void __user *restorer;
329 unsigned long save_sp = PT_REGS_SP(regs);
330 int err = 0;
331
332 /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
333 stack_top = ((stack_top + 4) & -16UL) - 4;
334 frame = (struct sigframe __user *) stack_top - 1;
335 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
336 return 1;
337
338 restorer = frame->retcode;
339 if (ka->sa.sa_flags & SA_RESTORER)
340 restorer = ka->sa.sa_restorer;
341
342 /* Update SP now because the page fault handler refuses to extend
343 * the stack if the faulting address is too far below the current
344 * SP, which frame now certainly is. If there's an error, the original
345 * value is restored on the way out.
346 * When writing the sigcontext to the stack, we have to write the
347 * original value, so that's passed to copy_sc_to_user, which does
348 * the right thing with it.
349 */
350 PT_REGS_SP(regs) = (unsigned long) frame;
351
352 err |= __put_user(restorer, &frame->pretcode);
353 err |= __put_user(sig, &frame->sig);
354 err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
355 err |= __put_user(mask->sig[0], &frame->sc.oldmask);
356 if (_NSIG_WORDS > 1)
357 err |= __copy_to_user(&frame->extramask, &mask->sig[1],
358 sizeof(frame->extramask));
359
360 /*
361 * This is popl %eax ; movl $,%eax ; int $0x80
362 *
363 * WE DO NOT USE IT ANY MORE! It's only left here for historical
364 * reasons and because gdb uses it as a signature to notice
365 * signal handler stack frames.
366 */
367 err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
368 err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
369 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
370
371 if (err)
372 goto err;
373
374 PT_REGS_SP(regs) = (unsigned long) frame;
375 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
376 PT_REGS_EAX(regs) = (unsigned long) sig;
377 PT_REGS_EDX(regs) = (unsigned long) 0;
378 PT_REGS_ECX(regs) = (unsigned long) 0;
379
380 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
381 ptrace_notify(SIGTRAP);
382 return 0;
383
384err:
385 PT_REGS_SP(regs) = save_sp;
386 return err;
387}
388
389int setup_signal_stack_si(unsigned long stack_top, int sig,
390 struct k_sigaction *ka, struct pt_regs *regs,
391 siginfo_t *info, sigset_t *mask)
392{
393 struct rt_sigframe __user *frame;
394 void __user *restorer;
395 unsigned long save_sp = PT_REGS_SP(regs);
396 int err = 0;
397
398 stack_top &= -8UL;
399 frame = (struct rt_sigframe __user *) stack_top - 1;
400 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
401 return 1;
402
403 restorer = frame->retcode;
404 if (ka->sa.sa_flags & SA_RESTORER)
405 restorer = ka->sa.sa_restorer;
406
407 /* See comment above about why this is here */
408 PT_REGS_SP(regs) = (unsigned long) frame;
409
410 err |= __put_user(restorer, &frame->pretcode);
411 err |= __put_user(sig, &frame->sig);
412 err |= __put_user(&frame->info, &frame->pinfo);
413 err |= __put_user(&frame->uc, &frame->puc);
414 err |= copy_siginfo_to_user(&frame->info, info);
415 err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
416 save_sp);
417
418 /*
419 * This is movl $,%eax ; int $0x80
420 *
421 * WE DO NOT USE IT ANY MORE! It's only left here for historical
422 * reasons and because gdb uses it as a signature to notice
423 * signal handler stack frames.
424 */
425 err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
426 err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
427 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
428
429 if (err)
430 goto err;
431
432 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
433 PT_REGS_EAX(regs) = (unsigned long) sig;
434 PT_REGS_EDX(regs) = (unsigned long) &frame->info;
435 PT_REGS_ECX(regs) = (unsigned long) &frame->uc;
436
437 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
438 ptrace_notify(SIGTRAP);
439 return 0;
440
441err:
442 PT_REGS_SP(regs) = save_sp;
443 return err;
444}
445
446long sys_sigreturn(struct pt_regs regs)
447{
448 unsigned long sp = PT_REGS_SP(&current->thread.regs);
449 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
450 sigset_t set;
451 struct sigcontext __user *sc = &frame->sc;
452 unsigned long __user *oldmask = &sc->oldmask;
453 unsigned long __user *extramask = frame->extramask;
454 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
455
456 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
457 copy_from_user(&set.sig[1], extramask, sig_size))
458 goto segfault;
459
460 sigdelsetmask(&set, ~_BLOCKABLE);
461
462 spin_lock_irq(&current->sighand->siglock);
463 current->blocked = set;
464 recalc_sigpending();
465 spin_unlock_irq(&current->sighand->siglock);
466
467 if (copy_sc_from_user(&current->thread.regs, sc))
468 goto segfault;
469
470 /* Avoid ERESTART handling */
471 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
472 return PT_REGS_SYSCALL_RET(&current->thread.regs);
473
474 segfault:
475 force_sig(SIGSEGV, current);
476 return 0;
477}
478
479long sys_rt_sigreturn(struct pt_regs regs)
480{
481 unsigned long sp = PT_REGS_SP(&current->thread.regs);
482 struct rt_sigframe __user *frame =
483 (struct rt_sigframe __user *) (sp - 4);
484 sigset_t set;
485 struct ucontext __user *uc = &frame->uc;
486 int sig_size = _NSIG_WORDS * sizeof(unsigned long);
487
488 if (copy_from_user(&set, &uc->uc_sigmask, sig_size))
489 goto segfault;
490
491 sigdelsetmask(&set, ~_BLOCKABLE);
492
493 spin_lock_irq(&current->sighand->siglock);
494 current->blocked = set;
495 recalc_sigpending();
496 spin_unlock_irq(&current->sighand->siglock);
497
498 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
499 goto segfault;
500
501 /* Avoid ERESTART handling */
502 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
503 return PT_REGS_SYSCALL_RET(&current->thread.regs);
504
505 segfault:
506 force_sig(SIGSEGV, current);
507 return 0;
508}
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S
new file mode 100644
index 00000000000..54a36ec20cb
--- /dev/null
+++ b/arch/um/sys-i386/stub.S
@@ -0,0 +1,51 @@
1#include "as-layout.h"
2
3 .globl syscall_stub
4.section .__syscall_stub, "ax"
5
6 .globl batch_syscall_stub
7batch_syscall_stub:
8 /* load pointer to first operation */
9 mov $(STUB_DATA+8), %esp
10
11again:
12 /* load length of additional data */
13 mov 0x0(%esp), %eax
14
15 /* if(length == 0) : end of list */
16 /* write possible 0 to header */
17 mov %eax, STUB_DATA+4
18 cmpl $0, %eax
19 jz done
20
21 /* save current pointer */
22 mov %esp, STUB_DATA+4
23
24 /* skip additional data */
25 add %eax, %esp
26
27 /* load syscall-# */
28 pop %eax
29
30 /* load syscall params */
31 pop %ebx
32 pop %ecx
33 pop %edx
34 pop %esi
35 pop %edi
36 pop %ebp
37
38 /* execute syscall */
39 int $0x80
40
41 /* check return value */
42 pop %ebx
43 cmp %ebx, %eax
44 je again
45
46done:
47 /* save return value */
48 mov %eax, STUB_DATA
49
50 /* stop */
51 int3
diff --git a/arch/um/sys-i386/stub_segv.c b/arch/um/sys-i386/stub_segv.c
new file mode 100644
index 00000000000..28ccf737a79
--- /dev/null
+++ b/arch/um/sys-i386/stub_segv.c
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "sysdep/stub.h"
7#include "sysdep/sigcontext.h"
8
9void __attribute__ ((__section__ (".__syscall_stub")))
10stub_segv_handler(int sig)
11{
12 struct sigcontext *sc = (struct sigcontext *) (&sig + 1);
13
14 GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc);
15
16 trap_myself();
17}
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
new file mode 100644
index 00000000000..de274071455
--- /dev/null
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -0,0 +1,28 @@
1#include <linux/linkage.h>
2/* Steal i386 syscall table for our purposes, but with some slight changes.*/
3
4#define sys_iopl sys_ni_syscall
5#define sys_ioperm sys_ni_syscall
6
7#define sys_vm86old sys_ni_syscall
8#define sys_vm86 sys_ni_syscall
9
10#define old_mmap sys_old_mmap
11
12#define ptregs_fork sys_fork
13#define ptregs_execve sys_execve
14#define ptregs_iopl sys_iopl
15#define ptregs_vm86old sys_vm86old
16#define ptregs_sigreturn sys_sigreturn
17#define ptregs_clone sys_clone
18#define ptregs_vm86 sys_vm86
19#define ptregs_rt_sigreturn sys_rt_sigreturn
20#define ptregs_sigaltstack sys_sigaltstack
21#define ptregs_vfork sys_vfork
22
23.section .rodata,"a"
24
25#include "../../x86/kernel/syscall_table_32.S"
26
27ENTRY(syscall_table_size)
28.long .-sys_call_table
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
new file mode 100644
index 00000000000..70ca357393b
--- /dev/null
+++ b/arch/um/sys-i386/syscalls.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/shm.h"
8#include "linux/ipc.h"
9#include "linux/syscalls.h"
10#include "asm/mman.h"
11#include "asm/uaccess.h"
12#include "asm/unistd.h"
13
14/*
15 * The prototype on i386 is:
16 *
17 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
18 *
19 * and the "newtls" arg. on i386 is read by copy_thread directly from the
20 * register saved on the stack.
21 */
22long sys_clone(unsigned long clone_flags, unsigned long newsp,
23 int __user *parent_tid, void *newtls, int __user *child_tid)
24{
25 long ret;
26
27 if (!newsp)
28 newsp = UPT_SP(&current->thread.regs.regs);
29
30 current->thread.forking = 1;
31 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
32 child_tid);
33 current->thread.forking = 0;
34 return ret;
35}
36
37long sys_sigaction(int sig, const struct old_sigaction __user *act,
38 struct old_sigaction __user *oact)
39{
40 struct k_sigaction new_ka, old_ka;
41 int ret;
42
43 if (act) {
44 old_sigset_t mask;
45 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
46 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
47 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
48 return -EFAULT;
49 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
50 __get_user(mask, &act->sa_mask);
51 siginitset(&new_ka.sa.sa_mask, mask);
52 }
53
54 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
55
56 if (!ret && oact) {
57 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
58 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
59 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
60 return -EFAULT;
61 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
62 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
63 }
64
65 return ret;
66}
diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c
new file mode 100644
index 00000000000..171b3e9dc86
--- /dev/null
+++ b/arch/um/sys-i386/sysrq.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2001 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/kernel.h"
7#include "linux/smp.h"
8#include "linux/sched.h"
9#include "linux/kallsyms.h"
10#include "asm/ptrace.h"
11#include "sysrq.h"
12
13/* This is declared by <linux/sched.h> */
14void show_regs(struct pt_regs *regs)
15{
16 printk("\n");
17 printk("EIP: %04lx:[<%08lx>] CPU: %d %s",
18 0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
19 smp_processor_id(), print_tainted());
20 if (PT_REGS_CS(regs) & 3)
21 printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
22 PT_REGS_SP(regs));
23 printk(" EFLAGS: %08lx\n %s\n", PT_REGS_EFLAGS(regs),
24 print_tainted());
25 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26 PT_REGS_EAX(regs), PT_REGS_EBX(regs),
27 PT_REGS_ECX(regs),
28 PT_REGS_EDX(regs));
29 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
30 PT_REGS_ESI(regs), PT_REGS_EDI(regs),
31 PT_REGS_EBP(regs));
32 printk(" DS: %04lx ES: %04lx\n",
33 0xffff & PT_REGS_DS(regs),
34 0xffff & PT_REGS_ES(regs));
35
36 show_trace(NULL, (unsigned long *) &regs);
37}
38
39/* Copied from i386. */
40static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
41{
42 return p > (void *)tinfo &&
43 p < (void *)tinfo + THREAD_SIZE - 3;
44}
45
46/* Adapted from i386 (we also print the address we read from). */
47static inline unsigned long print_context_stack(struct thread_info *tinfo,
48 unsigned long *stack, unsigned long ebp)
49{
50 unsigned long addr;
51
52#ifdef CONFIG_FRAME_POINTER
53 while (valid_stack_ptr(tinfo, (void *)ebp)) {
54 addr = *(unsigned long *)(ebp + 4);
55 printk("%08lx: [<%08lx>]", ebp + 4, addr);
56 print_symbol(" %s", addr);
57 printk("\n");
58 ebp = *(unsigned long *)ebp;
59 }
60#else
61 while (valid_stack_ptr(tinfo, stack)) {
62 addr = *stack;
63 if (__kernel_text_address(addr)) {
64 printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
65 print_symbol(" %s", addr);
66 printk("\n");
67 }
68 stack++;
69 }
70#endif
71 return ebp;
72}
73
74void show_trace(struct task_struct* task, unsigned long * stack)
75{
76 unsigned long ebp;
77 struct thread_info *context;
78
79 /* Turn this into BUG_ON if possible. */
80 if (!stack) {
81 stack = (unsigned long*) &stack;
82 printk("show_trace: got NULL stack, implicit assumption task == current");
83 WARN_ON(1);
84 }
85
86 if (!task)
87 task = current;
88
89 if (task != current) {
90 ebp = (unsigned long) KSTK_EBP(task);
91 } else {
92 asm ("movl %%ebp, %0" : "=r" (ebp) : );
93 }
94
95 context = (struct thread_info *)
96 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
97 print_context_stack(context, stack, ebp);
98
99 printk("\n");
100}
101
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
new file mode 100644
index 00000000000..c6c7131e563
--- /dev/null
+++ b/arch/um/sys-i386/tls.c
@@ -0,0 +1,396 @@
1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include "linux/percpu.h"
7#include "linux/sched.h"
8#include "asm/uaccess.h"
9#include "os.h"
10#include "skas.h"
11#include "sysdep/tls.h"
12
13/*
14 * If needed we can detect when it's uninitialized.
15 *
16 * These are initialized in an initcall and unchanged thereafter.
17 */
18static int host_supports_tls = -1;
19int host_gdt_entry_tls_min;
20
21int do_set_thread_area(struct user_desc *info)
22{
23 int ret;
24 u32 cpu;
25
26 cpu = get_cpu();
27 ret = os_set_thread_area(info, userspace_pid[cpu]);
28 put_cpu();
29
30 if (ret)
31 printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
32 "index = %d\n", ret, info->entry_number);
33
34 return ret;
35}
36
37int do_get_thread_area(struct user_desc *info)
38{
39 int ret;
40 u32 cpu;
41
42 cpu = get_cpu();
43 ret = os_get_thread_area(info, userspace_pid[cpu]);
44 put_cpu();
45
46 if (ret)
47 printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
48 "index = %d\n", ret, info->entry_number);
49
50 return ret;
51}
52
53/*
54 * sys_get_thread_area: get a yet unused TLS descriptor index.
55 * XXX: Consider leaving one free slot for glibc usage at first place. This must
56 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
57 *
58 * Also, this must be tested when compiling in SKAS mode with dynamic linking
59 * and running against NPTL.
60 */
61static int get_free_idx(struct task_struct* task)
62{
63 struct thread_struct *t = &task->thread;
64 int idx;
65
66 if (!t->arch.tls_array)
67 return GDT_ENTRY_TLS_MIN;
68
69 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
70 if (!t->arch.tls_array[idx].present)
71 return idx + GDT_ENTRY_TLS_MIN;
72 return -ESRCH;
73}
74
75static inline void clear_user_desc(struct user_desc* info)
76{
77 /* Postcondition: LDT_empty(info) returns true. */
78 memset(info, 0, sizeof(*info));
79
80 /*
81 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
82 * indeed an empty user_desc.
83 */
84 info->read_exec_only = 1;
85 info->seg_not_present = 1;
86}
87
88#define O_FORCE 1
89
90static int load_TLS(int flags, struct task_struct *to)
91{
92 int ret = 0;
93 int idx;
94
95 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
96 struct uml_tls_struct* curr =
97 &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
98
99 /*
100 * Actually, now if it wasn't flushed it gets cleared and
101 * flushed to the host, which will clear it.
102 */
103 if (!curr->present) {
104 if (!curr->flushed) {
105 clear_user_desc(&curr->tls);
106 curr->tls.entry_number = idx;
107 } else {
108 WARN_ON(!LDT_empty(&curr->tls));
109 continue;
110 }
111 }
112
113 if (!(flags & O_FORCE) && curr->flushed)
114 continue;
115
116 ret = do_set_thread_area(&curr->tls);
117 if (ret)
118 goto out;
119
120 curr->flushed = 1;
121 }
122out:
123 return ret;
124}
125
126/*
127 * Verify if we need to do a flush for the new process, i.e. if there are any
128 * present desc's, only if they haven't been flushed.
129 */
130static inline int needs_TLS_update(struct task_struct *task)
131{
132 int i;
133 int ret = 0;
134
135 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
136 struct uml_tls_struct* curr =
137 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
138
139 /*
140 * Can't test curr->present, we may need to clear a descriptor
141 * which had a value.
142 */
143 if (curr->flushed)
144 continue;
145 ret = 1;
146 break;
147 }
148 return ret;
149}
150
151/*
152 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
153 * we mark them as such and the first switch_to will do the job.
154 */
155void clear_flushed_tls(struct task_struct *task)
156{
157 int i;
158
159 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
160 struct uml_tls_struct* curr =
161 &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
162
163 /*
164 * Still correct to do this, if it wasn't present on the host it
165 * will remain as flushed as it was.
166 */
167 if (!curr->present)
168 continue;
169
170 curr->flushed = 0;
171 }
172}
173
174/*
175 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
176 * common host process. So this is needed in SKAS0 too.
177 *
178 * However, if each thread had a different host process (and this was discussed
179 * for SMP support) this won't be needed.
180 *
181 * And this will not need be used when (and if) we'll add support to the host
182 * SKAS patch.
183 */
184
185int arch_switch_tls(struct task_struct *to)
186{
187 if (!host_supports_tls)
188 return 0;
189
190 /*
191 * We have no need whatsoever to switch TLS for kernel threads; beyond
192 * that, that would also result in us calling os_set_thread_area with
193 * userspace_pid[cpu] == 0, which gives an error.
194 */
195 if (likely(to->mm))
196 return load_TLS(O_FORCE, to);
197
198 return 0;
199}
200
201static int set_tls_entry(struct task_struct* task, struct user_desc *info,
202 int idx, int flushed)
203{
204 struct thread_struct *t = &task->thread;
205
206 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
207 return -EINVAL;
208
209 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
210 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
211 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
212
213 return 0;
214}
215
216int arch_copy_tls(struct task_struct *new)
217{
218 struct user_desc info;
219 int idx, ret = -EFAULT;
220
221 if (copy_from_user(&info,
222 (void __user *) UPT_ESI(&new->thread.regs.regs),
223 sizeof(info)))
224 goto out;
225
226 ret = -EINVAL;
227 if (LDT_empty(&info))
228 goto out;
229
230 idx = info.entry_number;
231
232 ret = set_tls_entry(new, &info, idx, 0);
233out:
234 return ret;
235}
236
237/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
238static int get_tls_entry(struct task_struct *task, struct user_desc *info,
239 int idx)
240{
241 struct thread_struct *t = &task->thread;
242
243 if (!t->arch.tls_array)
244 goto clear;
245
246 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
247 return -EINVAL;
248
249 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
250 goto clear;
251
252 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
253
254out:
255 /*
256 * Temporary debugging check, to make sure that things have been
257 * flushed. This could be triggered if load_TLS() failed.
258 */
259 if (unlikely(task == current &&
260 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
261 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
262 "without flushed TLS.", current->pid);
263 }
264
265 return 0;
266clear:
267 /*
268 * When the TLS entry has not been set, the values read to user in the
269 * tls_array are 0 (because it's cleared at boot, see
270 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
271 */
272 clear_user_desc(info);
273 info->entry_number = idx;
274 goto out;
275}
276
277int sys_set_thread_area(struct user_desc __user *user_desc)
278{
279 struct user_desc info;
280 int idx, ret;
281
282 if (!host_supports_tls)
283 return -ENOSYS;
284
285 if (copy_from_user(&info, user_desc, sizeof(info)))
286 return -EFAULT;
287
288 idx = info.entry_number;
289
290 if (idx == -1) {
291 idx = get_free_idx(current);
292 if (idx < 0)
293 return idx;
294 info.entry_number = idx;
295 /* Tell the user which slot we chose for him.*/
296 if (put_user(idx, &user_desc->entry_number))
297 return -EFAULT;
298 }
299
300 ret = do_set_thread_area(&info);
301 if (ret)
302 return ret;
303 return set_tls_entry(current, &info, idx, 1);
304}
305
306/*
307 * Perform set_thread_area on behalf of the traced child.
308 * Note: error handling is not done on the deferred load, and this differ from
309 * i386. However the only possible error are caused by bugs.
310 */
311int ptrace_set_thread_area(struct task_struct *child, int idx,
312 struct user_desc __user *user_desc)
313{
314 struct user_desc info;
315
316 if (!host_supports_tls)
317 return -EIO;
318
319 if (copy_from_user(&info, user_desc, sizeof(info)))
320 return -EFAULT;
321
322 return set_tls_entry(child, &info, idx, 0);
323}
324
325int sys_get_thread_area(struct user_desc __user *user_desc)
326{
327 struct user_desc info;
328 int idx, ret;
329
330 if (!host_supports_tls)
331 return -ENOSYS;
332
333 if (get_user(idx, &user_desc->entry_number))
334 return -EFAULT;
335
336 ret = get_tls_entry(current, &info, idx);
337 if (ret < 0)
338 goto out;
339
340 if (copy_to_user(user_desc, &info, sizeof(info)))
341 ret = -EFAULT;
342
343out:
344 return ret;
345}
346
347/*
348 * Perform get_thread_area on behalf of the traced child.
349 */
350int ptrace_get_thread_area(struct task_struct *child, int idx,
351 struct user_desc __user *user_desc)
352{
353 struct user_desc info;
354 int ret;
355
356 if (!host_supports_tls)
357 return -EIO;
358
359 ret = get_tls_entry(child, &info, idx);
360 if (ret < 0)
361 goto out;
362
363 if (copy_to_user(user_desc, &info, sizeof(info)))
364 ret = -EFAULT;
365out:
366 return ret;
367}
368
369/*
370 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
371 * if a 32-bit UML is running on a 64-bit host.
372 */
373static int __init __setup_host_supports_tls(void)
374{
375 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
376 if (host_supports_tls) {
377 printk(KERN_INFO "Host TLS support detected\n");
378 printk(KERN_INFO "Detected host type: ");
379 switch (host_gdt_entry_tls_min) {
380 case GDT_ENTRY_TLS_MIN_I386:
381 printk(KERN_CONT "i386");
382 break;
383 case GDT_ENTRY_TLS_MIN_X86_64:
384 printk(KERN_CONT "x86_64");
385 break;
386 }
387 printk(KERN_CONT " (GDT indexes %d to %d)\n",
388 host_gdt_entry_tls_min,
389 host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
390 } else
391 printk(KERN_ERR " Host TLS support NOT detected! "
392 "TLS support inside UML will not work\n");
393 return 0;
394}
395
396__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
new file mode 100644
index 00000000000..5f883bfe773
--- /dev/null
+++ b/arch/um/sys-i386/user-offsets.c
@@ -0,0 +1,53 @@
1#include <stdio.h>
2#include <stddef.h>
3#include <signal.h>
4#include <sys/poll.h>
5#include <sys/user.h>
6#include <sys/mman.h>
7#include <asm/ptrace.h>
8
9#define DEFINE(sym, val) \
10 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
11
12#define DEFINE_LONGS(sym, val) \
13 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
14
15#define OFFSET(sym, str, mem) \
16 DEFINE(sym, offsetof(struct str, mem));
17
18void foo(void)
19{
20 OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
21 OFFSET(HOST_SC_ERR, sigcontext, err);
22 OFFSET(HOST_SC_CR2, sigcontext, cr2);
23
24 DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
25 DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
26
27 DEFINE(HOST_IP, EIP);
28 DEFINE(HOST_SP, UESP);
29 DEFINE(HOST_EFLAGS, EFL);
30 DEFINE(HOST_EAX, EAX);
31 DEFINE(HOST_EBX, EBX);
32 DEFINE(HOST_ECX, ECX);
33 DEFINE(HOST_EDX, EDX);
34 DEFINE(HOST_ESI, ESI);
35 DEFINE(HOST_EDI, EDI);
36 DEFINE(HOST_EBP, EBP);
37 DEFINE(HOST_CS, CS);
38 DEFINE(HOST_SS, SS);
39 DEFINE(HOST_DS, DS);
40 DEFINE(HOST_FS, FS);
41 DEFINE(HOST_ES, ES);
42 DEFINE(HOST_GS, GS);
43 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
44
45 /* XXX Duplicated between i386 and x86_64 */
46 DEFINE(UM_POLLIN, POLLIN);
47 DEFINE(UM_POLLPRI, POLLPRI);
48 DEFINE(UM_POLLOUT, POLLOUT);
49
50 DEFINE(UM_PROT_READ, PROT_READ);
51 DEFINE(UM_PROT_WRITE, PROT_WRITE);
52 DEFINE(UM_PROT_EXEC, PROT_EXEC);
53}
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
new file mode 100644
index 00000000000..bd4d1d3ba91
--- /dev/null
+++ b/arch/um/sys-x86_64/Makefile
@@ -0,0 +1,27 @@
1#
2# Copyright 2003 PathScale, Inc.
3#
4# Licensed under the GPL
5#
6
7obj-y = bug.o bugs.o delay.o fault.o ldt.o ptrace.o ptrace_user.o mem.o \
8 setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
9 sysrq.o ksyms.o tls.o
10
11obj-y += vdso/
12
13subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
14 lib/rwsem.o
15subarch-obj-$(CONFIG_MODULES) += kernel/module.o
16
17ldt-y = ../sys-i386/ldt.o
18
19USER_OBJS := ptrace_user.o
20
21USER_OBJS += user-offsets.s
22extra-y += user-offsets.s
23
24UNPROFILE_OBJS := stub_segv.o
25CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
26
27include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-x86_64/asm/archparam.h b/arch/um/sys-x86_64/asm/archparam.h
new file mode 100644
index 00000000000..6c083663b8d
--- /dev/null
+++ b/arch/um/sys-x86_64/asm/archparam.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_ARCHPARAM_X86_64_H
8#define __UM_ARCHPARAM_X86_64_H
9
10
11/* No user-accessible fixmap addresses, i.e. vsyscall */
12#define FIXADDR_USER_START 0
13#define FIXADDR_USER_END 0
14
15#endif
16
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
new file mode 100644
index 00000000000..11a2bfb3885
--- /dev/null
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7#ifndef __UM_ELF_X86_64_H
8#define __UM_ELF_X86_64_H
9
10#include <asm/user.h>
11#include "skas.h"
12
13/* x86-64 relocation types, taken from asm-x86_64/elf.h */
14#define R_X86_64_NONE 0 /* No reloc */
15#define R_X86_64_64 1 /* Direct 64 bit */
16#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
17#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
18#define R_X86_64_PLT32 4 /* 32 bit PLT address */
19#define R_X86_64_COPY 5 /* Copy symbol at runtime */
20#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
21#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
22#define R_X86_64_RELATIVE 8 /* Adjust by program base */
23#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
24 offset to GOT */
25#define R_X86_64_32 10 /* Direct 32 bit zero extended */
26#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
27#define R_X86_64_16 12 /* Direct 16 bit zero extended */
28#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
29#define R_X86_64_8 14 /* Direct 8 bit sign extended */
30#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
31
32#define R_X86_64_NUM 16
33
34typedef unsigned long elf_greg_t;
35
36#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
37typedef elf_greg_t elf_gregset_t[ELF_NGREG];
38
39typedef struct user_i387_struct elf_fpregset_t;
40
41/*
42 * This is used to ensure we don't load something for the wrong architecture.
43 */
44#define elf_check_arch(x) \
45 ((x)->e_machine == EM_X86_64)
46
47#define ELF_CLASS ELFCLASS64
48#define ELF_DATA ELFDATA2LSB
49#define ELF_ARCH EM_X86_64
50
51#define ELF_PLAT_INIT(regs, load_addr) do { \
52 PT_REGS_RBX(regs) = 0; \
53 PT_REGS_RCX(regs) = 0; \
54 PT_REGS_RDX(regs) = 0; \
55 PT_REGS_RSI(regs) = 0; \
56 PT_REGS_RDI(regs) = 0; \
57 PT_REGS_RBP(regs) = 0; \
58 PT_REGS_RAX(regs) = 0; \
59 PT_REGS_R8(regs) = 0; \
60 PT_REGS_R9(regs) = 0; \
61 PT_REGS_R10(regs) = 0; \
62 PT_REGS_R11(regs) = 0; \
63 PT_REGS_R12(regs) = 0; \
64 PT_REGS_R13(regs) = 0; \
65 PT_REGS_R14(regs) = 0; \
66 PT_REGS_R15(regs) = 0; \
67} while (0)
68
69#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
70 (pr_reg)[0] = (_regs)->regs.gp[0]; \
71 (pr_reg)[1] = (_regs)->regs.gp[1]; \
72 (pr_reg)[2] = (_regs)->regs.gp[2]; \
73 (pr_reg)[3] = (_regs)->regs.gp[3]; \
74 (pr_reg)[4] = (_regs)->regs.gp[4]; \
75 (pr_reg)[5] = (_regs)->regs.gp[5]; \
76 (pr_reg)[6] = (_regs)->regs.gp[6]; \
77 (pr_reg)[7] = (_regs)->regs.gp[7]; \
78 (pr_reg)[8] = (_regs)->regs.gp[8]; \
79 (pr_reg)[9] = (_regs)->regs.gp[9]; \
80 (pr_reg)[10] = (_regs)->regs.gp[10]; \
81 (pr_reg)[11] = (_regs)->regs.gp[11]; \
82 (pr_reg)[12] = (_regs)->regs.gp[12]; \
83 (pr_reg)[13] = (_regs)->regs.gp[13]; \
84 (pr_reg)[14] = (_regs)->regs.gp[14]; \
85 (pr_reg)[15] = (_regs)->regs.gp[15]; \
86 (pr_reg)[16] = (_regs)->regs.gp[16]; \
87 (pr_reg)[17] = (_regs)->regs.gp[17]; \
88 (pr_reg)[18] = (_regs)->regs.gp[18]; \
89 (pr_reg)[19] = (_regs)->regs.gp[19]; \
90 (pr_reg)[20] = (_regs)->regs.gp[20]; \
91 (pr_reg)[21] = current->thread.arch.fs; \
92 (pr_reg)[22] = 0; \
93 (pr_reg)[23] = 0; \
94 (pr_reg)[24] = 0; \
95 (pr_reg)[25] = 0; \
96 (pr_reg)[26] = 0;
97
98#define task_pt_regs(t) (&(t)->thread.regs)
99
100struct task_struct;
101
102extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
103
104#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
105
106#ifdef TIF_IA32 /* XXX */
107#error XXX, indeed
108 clear_thread_flag(TIF_IA32);
109#endif
110
111#define ELF_EXEC_PAGESIZE 4096
112
113#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
114
115extern long elf_aux_hwcap;
116#define ELF_HWCAP (elf_aux_hwcap)
117
118#define ELF_PLATFORM "x86_64"
119
120#define SET_PERSONALITY(ex) do ; while(0)
121
122#define __HAVE_ARCH_GATE_AREA 1
123#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
124struct linux_binprm;
125extern int arch_setup_additional_pages(struct linux_binprm *bprm,
126 int uses_interp);
127
128extern unsigned long um_vdso_addr;
129#define AT_SYSINFO_EHDR 33
130#define ARCH_DLINFO NEW_AUX_ENT(AT_SYSINFO_EHDR, um_vdso_addr)
131
132#endif
diff --git a/arch/um/sys-x86_64/asm/module.h b/arch/um/sys-x86_64/asm/module.h
new file mode 100644
index 00000000000..8eb79c2d07d
--- /dev/null
+++ b/arch/um/sys-x86_64/asm/module.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_MODULE_X86_64_H
8#define __UM_MODULE_X86_64_H
9
10/* UML is simple */
11struct mod_arch_specific
12{
13};
14
15#define Elf_Shdr Elf64_Shdr
16#define Elf_Sym Elf64_Sym
17#define Elf_Ehdr Elf64_Ehdr
18
19#endif
20
diff --git a/arch/um/sys-x86_64/asm/processor.h b/arch/um/sys-x86_64/asm/processor.h
new file mode 100644
index 00000000000..875a26a6261
--- /dev/null
+++ b/arch/um/sys-x86_64/asm/processor.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PROCESSOR_X86_64_H
8#define __UM_PROCESSOR_X86_64_H
9
10/* include faultinfo structure */
11#include "sysdep/faultinfo.h"
12
13struct arch_thread {
14 unsigned long debugregs[8];
15 int debugregs_seq;
16 unsigned long fs;
17 struct faultinfo faultinfo;
18};
19
20/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
21static inline void rep_nop(void)
22{
23 __asm__ __volatile__("rep;nop": : :"memory");
24}
25
26#define cpu_relax() rep_nop()
27
28#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
29 .debugregs_seq = 0, \
30 .fs = 0, \
31 .faultinfo = { 0, 0, 0 } }
32
33static inline void arch_flush_thread(struct arch_thread *thread)
34{
35}
36
37static inline void arch_copy_thread(struct arch_thread *from,
38 struct arch_thread *to)
39{
40 to->fs = from->fs;
41}
42
43#include <asm/user.h>
44
45#define current_text_addr() \
46 ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
47
48#define ARCH_IS_STACKGROW(address) \
49 (address + 128 >= UPT_SP(&current->thread.regs.regs))
50
51#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
52#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
53
54#include "asm/processor-generic.h"
55
56#endif
diff --git a/arch/um/sys-x86_64/asm/ptrace.h b/arch/um/sys-x86_64/asm/ptrace.h
new file mode 100644
index 00000000000..83d8c473b90
--- /dev/null
+++ b/arch/um/sys-x86_64/asm/ptrace.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __UM_PTRACE_X86_64_H
8#define __UM_PTRACE_X86_64_H
9
10#include "linux/compiler.h"
11#include "asm/errno.h"
12
13#define __FRAME_OFFSETS /* Needed to get the R* macros */
14#include "asm/ptrace-generic.h"
15
16#define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64
17
18#define PT_REGS_RBX(r) UPT_RBX(&(r)->regs)
19#define PT_REGS_RCX(r) UPT_RCX(&(r)->regs)
20#define PT_REGS_RDX(r) UPT_RDX(&(r)->regs)
21#define PT_REGS_RSI(r) UPT_RSI(&(r)->regs)
22#define PT_REGS_RDI(r) UPT_RDI(&(r)->regs)
23#define PT_REGS_RBP(r) UPT_RBP(&(r)->regs)
24#define PT_REGS_RAX(r) UPT_RAX(&(r)->regs)
25#define PT_REGS_R8(r) UPT_R8(&(r)->regs)
26#define PT_REGS_R9(r) UPT_R9(&(r)->regs)
27#define PT_REGS_R10(r) UPT_R10(&(r)->regs)
28#define PT_REGS_R11(r) UPT_R11(&(r)->regs)
29#define PT_REGS_R12(r) UPT_R12(&(r)->regs)
30#define PT_REGS_R13(r) UPT_R13(&(r)->regs)
31#define PT_REGS_R14(r) UPT_R14(&(r)->regs)
32#define PT_REGS_R15(r) UPT_R15(&(r)->regs)
33
34#define PT_REGS_FS(r) UPT_FS(&(r)->regs)
35#define PT_REGS_GS(r) UPT_GS(&(r)->regs)
36#define PT_REGS_DS(r) UPT_DS(&(r)->regs)
37#define PT_REGS_ES(r) UPT_ES(&(r)->regs)
38#define PT_REGS_SS(r) UPT_SS(&(r)->regs)
39#define PT_REGS_CS(r) UPT_CS(&(r)->regs)
40
41#define PT_REGS_ORIG_RAX(r) UPT_ORIG_RAX(&(r)->regs)
42#define PT_REGS_RIP(r) UPT_IP(&(r)->regs)
43#define PT_REGS_RSP(r) UPT_SP(&(r)->regs)
44
45#define PT_REGS_EFLAGS(r) UPT_EFLAGS(&(r)->regs)
46
47/* XXX */
48#define user_mode(r) UPT_IS_USER(&(r)->regs)
49#define PT_REGS_ORIG_SYSCALL(r) PT_REGS_RAX(r)
50#define PT_REGS_SYSCALL_RET(r) PT_REGS_RAX(r)
51
52#define PT_FIX_EXEC_STACK(sp) do ; while(0)
53
54#define profile_pc(regs) PT_REGS_IP(regs)
55
56struct user_desc;
57
58static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
59 struct user_desc __user *user_desc)
60{
61 return -ENOSYS;
62}
63
64static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
65 struct user_desc __user *user_desc)
66{
67 return -ENOSYS;
68}
69
70extern long arch_prctl(struct task_struct *task, int code,
71 unsigned long __user *addr);
72#endif
diff --git a/arch/um/sys-x86_64/bug.c b/arch/um/sys-x86_64/bug.c
new file mode 100644
index 00000000000..e8034e363d8
--- /dev/null
+++ b/arch/um/sys-x86_64/bug.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL V2
4 */
5
6#include <linux/uaccess.h>
7
8/*
9 * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
10 * that's not relevant in skas mode.
11 */
12
13int is_valid_bugaddr(unsigned long eip)
14{
15 unsigned short ud2;
16
17 if (probe_kernel_address((unsigned short __user *)eip, ud2))
18 return 0;
19
20 return ud2 == 0x0b0f;
21}
diff --git a/arch/um/sys-x86_64/bugs.c b/arch/um/sys-x86_64/bugs.c
new file mode 100644
index 00000000000..44e02ba2a26
--- /dev/null
+++ b/arch/um/sys-x86_64/bugs.c
@@ -0,0 +1,15 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include "sysdep/ptrace.h"
8
9void arch_check_bugs(void)
10{
11}
12
13void arch_examine_signal(int sig, struct uml_pt_regs *regs)
14{
15}
diff --git a/arch/um/sys-x86_64/delay.c b/arch/um/sys-x86_64/delay.c
new file mode 100644
index 00000000000..f3fe1a688f7
--- /dev/null
+++ b/arch/um/sys-x86_64/delay.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 * Mostly copied from arch/x86/lib/delay.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <asm/param.h>
14
15void __delay(unsigned long loops)
16{
17 asm volatile(
18 "test %0,%0\n"
19 "jz 3f\n"
20 "jmp 1f\n"
21
22 ".align 16\n"
23 "1: jmp 2f\n"
24
25 ".align 16\n"
26 "2: dec %0\n"
27 " jnz 2b\n"
28 "3: dec %0\n"
29
30 : /* we don't need output */
31 : "a" (loops)
32 );
33}
34EXPORT_SYMBOL(__delay);
35
36inline void __const_udelay(unsigned long xloops)
37{
38 int d0;
39
40 xloops *= 4;
41 asm("mull %%edx"
42 : "=d" (xloops), "=&a" (d0)
43 : "1" (xloops), "0"
44 (loops_per_jiffy * (HZ/4)));
45
46 __delay(++xloops);
47}
48EXPORT_SYMBOL(__const_udelay);
49
50void __udelay(unsigned long usecs)
51{
52 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
53}
54EXPORT_SYMBOL(__udelay);
55
56void __ndelay(unsigned long nsecs)
57{
58 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
59}
60EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-x86_64/fault.c b/arch/um/sys-x86_64/fault.c
new file mode 100644
index 00000000000..ce85117fc64
--- /dev/null
+++ b/arch/um/sys-x86_64/fault.c
@@ -0,0 +1,28 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include "sysdep/ptrace.h"
8
9/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
10struct exception_table_entry
11{
12 unsigned long insn;
13 unsigned long fixup;
14};
15
16const struct exception_table_entry *search_exception_tables(unsigned long add);
17
18int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
19{
20 const struct exception_table_entry *fixup;
21
22 fixup = search_exception_tables(address);
23 if (fixup != 0) {
24 UPT_IP(regs) = fixup->fixup;
25 return 1;
26 }
27 return 0;
28}
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
new file mode 100644
index 00000000000..1db2fce0094
--- /dev/null
+++ b/arch/um/sys-x86_64/ksyms.c
@@ -0,0 +1,11 @@
1#include <linux/module.h>
2#include <asm/string.h>
3#include <asm/checksum.h>
4
5/*XXX: we need them because they would be exported by x86_64 */
6#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
7EXPORT_SYMBOL(memcpy);
8#else
9EXPORT_SYMBOL(__memcpy);
10#endif
11EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-x86_64/mem.c b/arch/um/sys-x86_64/mem.c
new file mode 100644
index 00000000000..546518727a7
--- /dev/null
+++ b/arch/um/sys-x86_64/mem.c
@@ -0,0 +1,26 @@
1#include "linux/mm.h"
2#include "asm/page.h"
3#include "asm/mman.h"
4
5const char *arch_vma_name(struct vm_area_struct *vma)
6{
7 if (vma->vm_mm && vma->vm_start == um_vdso_addr)
8 return "[vdso]";
9
10 return NULL;
11}
12
13struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
14{
15 return NULL;
16}
17
18int in_gate_area(struct mm_struct *mm, unsigned long addr)
19{
20 return 0;
21}
22
23int in_gate_area_no_mm(unsigned long addr)
24{
25 return 0;
26}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
new file mode 100644
index 00000000000..4005506834f
--- /dev/null
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/errno.h>
11#define __FRAME_OFFSETS
12#include <asm/ptrace.h>
13#include <asm/uaccess.h>
14
15/*
16 * determines which flags the user has access to.
17 * 1 = access 0 = no access
18 */
19#define FLAG_MASK 0x44dd5UL
20
21int putreg(struct task_struct *child, int regno, unsigned long value)
22{
23 unsigned long tmp;
24
25#ifdef TIF_IA32
26 /*
27 * Some code in the 64bit emulation may not be 64bit clean.
28 * Don't take any chances.
29 */
30 if (test_tsk_thread_flag(child, TIF_IA32))
31 value &= 0xffffffff;
32#endif
33 switch (regno) {
34 case FS:
35 case GS:
36 case DS:
37 case ES:
38 case SS:
39 case CS:
40 if (value && (value & 3) != 3)
41 return -EIO;
42 value &= 0xffff;
43 break;
44
45 case FS_BASE:
46 case GS_BASE:
47 if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
48 return -EIO;
49 break;
50
51 case EFLAGS:
52 value &= FLAG_MASK;
53 tmp = PT_REGS_EFLAGS(&child->thread.regs) & ~FLAG_MASK;
54 value |= tmp;
55 break;
56 }
57
58 PT_REGS_SET(&child->thread.regs, regno, value);
59 return 0;
60}
61
62int poke_user(struct task_struct *child, long addr, long data)
63{
64 if ((addr & 3) || addr < 0)
65 return -EIO;
66
67 if (addr < MAX_REG_OFFSET)
68 return putreg(child, addr, data);
69 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
70 (addr <= offsetof(struct user, u_debugreg[7]))) {
71 addr -= offsetof(struct user, u_debugreg[0]);
72 addr = addr >> 2;
73 if ((addr == 4) || (addr == 5))
74 return -EIO;
75 child->thread.arch.debugregs[addr] = data;
76 return 0;
77 }
78 return -EIO;
79}
80
81unsigned long getreg(struct task_struct *child, int regno)
82{
83 unsigned long retval = ~0UL;
84 switch (regno) {
85 case FS:
86 case GS:
87 case DS:
88 case ES:
89 case SS:
90 case CS:
91 retval = 0xffff;
92 /* fall through */
93 default:
94 retval &= PT_REG(&child->thread.regs, regno);
95#ifdef TIF_IA32
96 if (test_tsk_thread_flag(child, TIF_IA32))
97 retval &= 0xffffffff;
98#endif
99 }
100 return retval;
101}
102
103int peek_user(struct task_struct *child, long addr, long data)
104{
105 /* read the word at location addr in the USER area. */
106 unsigned long tmp;
107
108 if ((addr & 3) || addr < 0)
109 return -EIO;
110
111 tmp = 0; /* Default return condition */
112 if (addr < MAX_REG_OFFSET)
113 tmp = getreg(child, addr);
114 else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
115 (addr <= offsetof(struct user, u_debugreg[7]))) {
116 addr -= offsetof(struct user, u_debugreg[0]);
117 addr = addr >> 2;
118 tmp = child->thread.arch.debugregs[addr];
119 }
120 return put_user(tmp, (unsigned long *) data);
121}
122
123/* XXX Mostly copied from sys-i386 */
124int is_syscall(unsigned long addr)
125{
126 unsigned short instr;
127 int n;
128
129 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
130 if (n) {
131 /*
132 * access_process_vm() grants access to vsyscall and stub,
133 * while copy_from_user doesn't. Maybe access_process_vm is
134 * slow, but that doesn't matter, since it will be called only
135 * in case of singlestepping, if copy_from_user failed.
136 */
137 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
138 if (n != sizeof(instr)) {
139 printk("is_syscall : failed to read instruction from "
140 "0x%lx\n", addr);
141 return 1;
142 }
143 }
144 /* sysenter */
145 return instr == 0x050f;
146}
147
148static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
149{
150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
151 long fpregs[HOST_FP_SIZE];
152
153 BUG_ON(sizeof(*buf) != sizeof(fpregs));
154 err = save_fp_registers(userspace_pid[cpu], fpregs);
155 if (err)
156 return err;
157
158 n = copy_to_user(buf, fpregs, sizeof(fpregs));
159 if (n > 0)
160 return -EFAULT;
161
162 return n;
163}
164
165static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
166{
167 int n, cpu = ((struct thread_info *) child->stack)->cpu;
168 long fpregs[HOST_FP_SIZE];
169
170 BUG_ON(sizeof(*buf) != sizeof(fpregs));
171 n = copy_from_user(fpregs, buf, sizeof(fpregs));
172 if (n > 0)
173 return -EFAULT;
174
175 return restore_fp_registers(userspace_pid[cpu], fpregs);
176}
177
178long subarch_ptrace(struct task_struct *child, long request,
179 unsigned long addr, unsigned long data)
180{
181 int ret = -EIO;
182 void __user *datap = (void __user *) data;
183
184 switch (request) {
185 case PTRACE_GETFPREGS: /* Get the child FPU state. */
186 ret = get_fpregs(datap, child);
187 break;
188 case PTRACE_SETFPREGS: /* Set the child FPU state. */
189 ret = set_fpregs(datap, child);
190 break;
191 case PTRACE_ARCH_PRCTL:
192 /* XXX Calls ptrace on the host - needs some SMP thinking */
193 ret = arch_prctl(child, data, (void __user *) addr);
194 break;
195 }
196
197 return ret;
198}
diff --git a/arch/um/sys-x86_64/ptrace_user.c b/arch/um/sys-x86_64/ptrace_user.c
new file mode 100644
index 00000000000..c57a496d3f5
--- /dev/null
+++ b/arch/um/sys-x86_64/ptrace_user.c
@@ -0,0 +1,22 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include <errno.h>
8#include "ptrace_user.h"
9
10int ptrace_getregs(long pid, unsigned long *regs_out)
11{
12 if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
13 return -errno;
14 return(0);
15}
16
17int ptrace_setregs(long pid, unsigned long *regs_out)
18{
19 if (ptrace(PTRACE_SETREGS, pid, 0, regs_out) < 0)
20 return -errno;
21 return(0);
22}
diff --git a/arch/um/sys-x86_64/setjmp.S b/arch/um/sys-x86_64/setjmp.S
new file mode 100644
index 00000000000..45f547b4043
--- /dev/null
+++ b/arch/um/sys-x86_64/setjmp.S
@@ -0,0 +1,54 @@
1#
2# arch/x86_64/setjmp.S
3#
4# setjmp/longjmp for the x86-64 architecture
5#
6
7#
8# The jmp_buf is assumed to contain the following, in order:
9# %rbx
10# %rsp (post-return)
11# %rbp
12# %r12
13# %r13
14# %r14
15# %r15
16# <return address>
17#
18
19 .text
20 .align 4
21 .globl setjmp
22 .type setjmp, @function
23setjmp:
24 pop %rsi # Return address, and adjust the stack
25 xorl %eax,%eax # Return value
26 movq %rbx,(%rdi)
27 movq %rsp,8(%rdi) # Post-return %rsp!
28 push %rsi # Make the call/return stack happy
29 movq %rbp,16(%rdi)
30 movq %r12,24(%rdi)
31 movq %r13,32(%rdi)
32 movq %r14,40(%rdi)
33 movq %r15,48(%rdi)
34 movq %rsi,56(%rdi) # Return address
35 ret
36
37 .size setjmp,.-setjmp
38
39 .text
40 .align 4
41 .globl longjmp
42 .type longjmp, @function
43longjmp:
44 movl %esi,%eax # Return value (int)
45 movq (%rdi),%rbx
46 movq 8(%rdi),%rsp
47 movq 16(%rdi),%rbp
48 movq 24(%rdi),%r12
49 movq 32(%rdi),%r13
50 movq 40(%rdi),%r14
51 movq 48(%rdi),%r15
52 jmp *56(%rdi)
53
54 .size longjmp,.-longjmp
diff --git a/arch/um/sys-x86_64/shared/sysdep/archsetjmp.h b/arch/um/sys-x86_64/shared/sysdep/archsetjmp.h
new file mode 100644
index 00000000000..2af8f12ca16
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/archsetjmp.h
@@ -0,0 +1,24 @@
1/*
2 * arch/um/include/sysdep-x86_64/archsetjmp.h
3 */
4
5#ifndef _KLIBC_ARCHSETJMP_H
6#define _KLIBC_ARCHSETJMP_H
7
8struct __jmp_buf {
9 unsigned long __rbx;
10 unsigned long __rsp;
11 unsigned long __rbp;
12 unsigned long __r12;
13 unsigned long __r13;
14 unsigned long __r14;
15 unsigned long __r15;
16 unsigned long __rip;
17};
18
19typedef struct __jmp_buf jmp_buf[1];
20
21#define JB_IP __rip
22#define JB_SP __rsp
23
24#endif /* _SETJMP_H */
diff --git a/arch/um/sys-x86_64/shared/sysdep/barrier.h b/arch/um/sys-x86_64/shared/sysdep/barrier.h
new file mode 100644
index 00000000000..7b610befdc8
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/barrier.h
@@ -0,0 +1,7 @@
1#ifndef __SYSDEP_X86_64_BARRIER_H
2#define __SYSDEP_X86_64_BARRIER_H
3
4/* Copied from include/asm-x86_64 for use by userspace. */
5#define mb() asm volatile("mfence":::"memory")
6
7#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/checksum.h b/arch/um/sys-x86_64/shared/sysdep/checksum.h
new file mode 100644
index 00000000000..a5be9031ea8
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/checksum.h
@@ -0,0 +1,144 @@
1/*
2 * Licensed under the GPL
3 */
4
5#ifndef __UM_SYSDEP_CHECKSUM_H
6#define __UM_SYSDEP_CHECKSUM_H
7
8#include "linux/string.h"
9#include "linux/in6.h"
10#include "asm/uaccess.h"
11
12extern __wsum csum_partial(const void *buff, int len, __wsum sum);
13
14/*
15 * Note: when you get a NULL pointer exception here this means someone
16 * passed in an incorrect kernel address to one of these functions.
17 *
18 * If you use these functions directly please don't forget the
19 * access_ok().
20 */
21
22static __inline__
23__wsum csum_partial_copy_nocheck(const void *src, void *dst,
24 int len, __wsum sum)
25{
26 memcpy(dst, src, len);
27 return(csum_partial(dst, len, sum));
28}
29
30static __inline__
31__wsum csum_partial_copy_from_user(const void __user *src,
32 void *dst, int len, __wsum sum,
33 int *err_ptr)
34{
35 if (copy_from_user(dst, src, len)) {
36 *err_ptr = -EFAULT;
37 return (__force __wsum)-1;
38 }
39 return csum_partial(dst, len, sum);
40}
41
42/**
43 * csum_fold - Fold and invert a 32bit checksum.
44 * sum: 32bit unfolded sum
45 *
46 * Fold a 32bit running checksum to 16bit and invert it. This is usually
47 * the last step before putting a checksum into a packet.
48 * Make sure not to mix with 64bit checksums.
49 */
50static inline __sum16 csum_fold(__wsum sum)
51{
52 __asm__(
53 " addl %1,%0\n"
54 " adcl $0xffff,%0"
55 : "=r" (sum)
56 : "r" ((__force u32)sum << 16),
57 "0" ((__force u32)sum & 0xffff0000)
58 );
59 return (__force __sum16)(~(__force u32)sum >> 16);
60}
61
62/**
63 * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
64 * @saddr: source address
65 * @daddr: destination address
66 * @len: length of packet
67 * @proto: ip protocol of packet
68 * @sum: initial sum to be added in (32bit unfolded)
69 *
70 * Returns the pseudo header checksum the input data. Result is
71 * 32bit unfolded.
72 */
73static inline __wsum
74csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
75 unsigned short proto, __wsum sum)
76{
77 asm(" addl %1, %0\n"
78 " adcl %2, %0\n"
79 " adcl %3, %0\n"
80 " adcl $0, %0\n"
81 : "=r" (sum)
82 : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
83 return sum;
84}
85
86/*
87 * computes the checksum of the TCP/UDP pseudo-header
88 * returns a 16-bit checksum, already complemented
89 */
90static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
91 unsigned short len,
92 unsigned short proto,
93 __wsum sum)
94{
95 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
96}
97
98/**
99 * ip_fast_csum - Compute the IPv4 header checksum efficiently.
100 * iph: ipv4 header
101 * ihl: length of header / 4
102 */
103static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
104{
105 unsigned int sum;
106
107 asm( " movl (%1), %0\n"
108 " subl $4, %2\n"
109 " jbe 2f\n"
110 " addl 4(%1), %0\n"
111 " adcl 8(%1), %0\n"
112 " adcl 12(%1), %0\n"
113 "1: adcl 16(%1), %0\n"
114 " lea 4(%1), %1\n"
115 " decl %2\n"
116 " jne 1b\n"
117 " adcl $0, %0\n"
118 " movl %0, %2\n"
119 " shrl $16, %0\n"
120 " addw %w2, %w0\n"
121 " adcl $0, %0\n"
122 " notl %0\n"
123 "2:"
124 /* Since the input registers which are loaded with iph and ipl
125 are modified, we must also specify them as outputs, or gcc
126 will assume they contain their original values. */
127 : "=r" (sum), "=r" (iph), "=r" (ihl)
128 : "1" (iph), "2" (ihl)
129 : "memory");
130 return (__force __sum16)sum;
131}
132
133static inline unsigned add32_with_carry(unsigned a, unsigned b)
134{
135 asm("addl %2,%0\n\t"
136 "adcl $0,%0"
137 : "=r" (a)
138 : "0" (a), "r" (b));
139 return a;
140}
141
142extern __sum16 ip_compute_csum(const void *buff, int len);
143
144#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/faultinfo.h b/arch/um/sys-x86_64/shared/sysdep/faultinfo.h
new file mode 100644
index 00000000000..cb917b0d566
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/faultinfo.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
4 * Licensed under the GPL
5 */
6
7#ifndef __FAULTINFO_X86_64_H
8#define __FAULTINFO_X86_64_H
9
10/* this structure contains the full arch-specific faultinfo
11 * from the traps.
12 * On i386, ptrace_faultinfo unfortunately doesn't provide
13 * all the info, since trap_no is missing.
14 * All common elements are defined at the same position in
15 * both structures, thus making it easy to copy the
16 * contents without knowledge about the structure elements.
17 */
18struct faultinfo {
19 int error_code; /* in ptrace_faultinfo misleadingly called is_write */
20 unsigned long cr2; /* in ptrace_faultinfo called addr */
21 int trap_no; /* missing in ptrace_faultinfo */
22};
23
24#define FAULT_WRITE(fi) ((fi).error_code & 2)
25#define FAULT_ADDRESS(fi) ((fi).cr2)
26
27#define PTRACE_FULL_FAULTINFO 1
28
29#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/host_ldt.h b/arch/um/sys-x86_64/shared/sysdep/host_ldt.h
new file mode 100644
index 00000000000..e8b1be1e154
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/host_ldt.h
@@ -0,0 +1,38 @@
1#ifndef __ASM_HOST_LDT_X86_64_H
2#define __ASM_HOST_LDT_X86_64_H
3
4#include <asm/ldt.h>
5
6/*
7 * macros stolen from include/asm-x86_64/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12/* Don't allow setting of the lm bit. It is useless anyways because
13 * 64bit system calls require __USER_CS. */
14#define LDT_entry_b(info) \
15 (((info)->base_addr & 0xff000000) | \
16 (((info)->base_addr & 0x00ff0000) >> 16) | \
17 ((info)->limit & 0xf0000) | \
18 (((info)->read_exec_only ^ 1) << 9) | \
19 ((info)->contents << 10) | \
20 (((info)->seg_not_present ^ 1) << 15) | \
21 ((info)->seg_32bit << 22) | \
22 ((info)->limit_in_pages << 23) | \
23 ((info)->useable << 20) | \
24 /* ((info)->lm << 21) | */ \
25 0x7000)
26
27#define LDT_empty(info) (\
28 (info)->base_addr == 0 && \
29 (info)->limit == 0 && \
30 (info)->contents == 0 && \
31 (info)->read_exec_only == 1 && \
32 (info)->seg_32bit == 0 && \
33 (info)->limit_in_pages == 0 && \
34 (info)->seg_not_present == 1 && \
35 (info)->useable == 0 && \
36 (info)->lm == 0)
37
38#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/kernel-offsets.h b/arch/um/sys-x86_64/shared/sysdep/kernel-offsets.h
new file mode 100644
index 00000000000..a307237b796
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/kernel-offsets.h
@@ -0,0 +1,23 @@
1#include <linux/stddef.h>
2#include <linux/sched.h>
3#include <linux/time.h>
4#include <linux/elf.h>
5#include <linux/crypto.h>
6#include <asm/page.h>
7#include <asm/mman.h>
8
9#define DEFINE(sym, val) \
10 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
11
12#define DEFINE_STR1(x) #x
13#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " DEFINE_STR1(val) " " #val: : )
14
15#define BLANK() asm volatile("\n->" : : )
16
17#define OFFSET(sym, str, mem) \
18 DEFINE(sym, offsetof(struct str, mem));
19
20void foo(void)
21{
22#include <common-offsets.h>
23}
diff --git a/arch/um/sys-x86_64/shared/sysdep/ptrace.h b/arch/um/sys-x86_64/shared/sysdep/ptrace.h
new file mode 100644
index 00000000000..8ee8f8e12af
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/ptrace.h
@@ -0,0 +1,240 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 *
5 * Licensed under the GPL
6 */
7
8#ifndef __SYSDEP_X86_64_PTRACE_H
9#define __SYSDEP_X86_64_PTRACE_H
10
11#include "user_constants.h"
12#include "sysdep/faultinfo.h"
13
14#define MAX_REG_OFFSET (UM_FRAME_SIZE)
15#define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long))
16
17#include "skas_ptregs.h"
18
19#define REGS_IP(r) ((r)[HOST_IP])
20#define REGS_SP(r) ((r)[HOST_SP])
21
22#define REGS_RBX(r) ((r)[HOST_RBX])
23#define REGS_RCX(r) ((r)[HOST_RCX])
24#define REGS_RDX(r) ((r)[HOST_RDX])
25#define REGS_RSI(r) ((r)[HOST_RSI])
26#define REGS_RDI(r) ((r)[HOST_RDI])
27#define REGS_RBP(r) ((r)[HOST_RBP])
28#define REGS_RAX(r) ((r)[HOST_RAX])
29#define REGS_R8(r) ((r)[HOST_R8])
30#define REGS_R9(r) ((r)[HOST_R9])
31#define REGS_R10(r) ((r)[HOST_R10])
32#define REGS_R11(r) ((r)[HOST_R11])
33#define REGS_R12(r) ((r)[HOST_R12])
34#define REGS_R13(r) ((r)[HOST_R13])
35#define REGS_R14(r) ((r)[HOST_R14])
36#define REGS_R15(r) ((r)[HOST_R15])
37#define REGS_CS(r) ((r)[HOST_CS])
38#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
39#define REGS_SS(r) ((r)[HOST_SS])
40
41#define HOST_FS_BASE 21
42#define HOST_GS_BASE 22
43#define HOST_DS 23
44#define HOST_ES 24
45#define HOST_FS 25
46#define HOST_GS 26
47
48/* Also defined in asm/ptrace-x86_64.h, but not in libc headers. So, these
49 * are already defined for kernel code, but not for userspace code.
50 */
51#ifndef FS_BASE
52/* These aren't defined in ptrace.h, but exist in struct user_regs_struct,
53 * which is what x86_64 ptrace actually uses.
54 */
55#define FS_BASE (HOST_FS_BASE * sizeof(long))
56#define GS_BASE (HOST_GS_BASE * sizeof(long))
57#define DS (HOST_DS * sizeof(long))
58#define ES (HOST_ES * sizeof(long))
59#define FS (HOST_FS * sizeof(long))
60#define GS (HOST_GS * sizeof(long))
61#endif
62
63#define REGS_FS_BASE(r) ((r)[HOST_FS_BASE])
64#define REGS_GS_BASE(r) ((r)[HOST_GS_BASE])
65#define REGS_DS(r) ((r)[HOST_DS])
66#define REGS_ES(r) ((r)[HOST_ES])
67#define REGS_FS(r) ((r)[HOST_FS])
68#define REGS_GS(r) ((r)[HOST_GS])
69
70#define REGS_ORIG_RAX(r) ((r)[HOST_ORIG_RAX])
71
72#define REGS_SET_SYSCALL_RETURN(r, res) REGS_RAX(r) = (res)
73
74#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
75
76#define REGS_SEGV_IS_FIXABLE(r) SEGV_IS_FIXABLE((r)->trap_type)
77
78#define REGS_FAULT_ADDR(r) ((r)->fault_addr)
79
80#define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type)
81
82#define REGS_TRAP(r) ((r)->trap_type)
83
84#define REGS_ERR(r) ((r)->fault_type)
85
86struct uml_pt_regs {
87 unsigned long gp[MAX_REG_NR];
88 unsigned long fp[HOST_FP_SIZE];
89 struct faultinfo faultinfo;
90 long syscall;
91 int is_user;
92};
93
94#define EMPTY_UML_PT_REGS { }
95
96#define UPT_RBX(r) REGS_RBX((r)->gp)
97#define UPT_RCX(r) REGS_RCX((r)->gp)
98#define UPT_RDX(r) REGS_RDX((r)->gp)
99#define UPT_RSI(r) REGS_RSI((r)->gp)
100#define UPT_RDI(r) REGS_RDI((r)->gp)
101#define UPT_RBP(r) REGS_RBP((r)->gp)
102#define UPT_RAX(r) REGS_RAX((r)->gp)
103#define UPT_R8(r) REGS_R8((r)->gp)
104#define UPT_R9(r) REGS_R9((r)->gp)
105#define UPT_R10(r) REGS_R10((r)->gp)
106#define UPT_R11(r) REGS_R11((r)->gp)
107#define UPT_R12(r) REGS_R12((r)->gp)
108#define UPT_R13(r) REGS_R13((r)->gp)
109#define UPT_R14(r) REGS_R14((r)->gp)
110#define UPT_R15(r) REGS_R15((r)->gp)
111#define UPT_CS(r) REGS_CS((r)->gp)
112#define UPT_FS_BASE(r) REGS_FS_BASE((r)->gp)
113#define UPT_FS(r) REGS_FS((r)->gp)
114#define UPT_GS_BASE(r) REGS_GS_BASE((r)->gp)
115#define UPT_GS(r) REGS_GS((r)->gp)
116#define UPT_DS(r) REGS_DS((r)->gp)
117#define UPT_ES(r) REGS_ES((r)->gp)
118#define UPT_CS(r) REGS_CS((r)->gp)
119#define UPT_SS(r) REGS_SS((r)->gp)
120#define UPT_ORIG_RAX(r) REGS_ORIG_RAX((r)->gp)
121
122#define UPT_IP(r) REGS_IP((r)->gp)
123#define UPT_SP(r) REGS_SP((r)->gp)
124
125#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
126#define UPT_SYSCALL_NR(r) ((r)->syscall)
127#define UPT_SYSCALL_RET(r) UPT_RAX(r)
128
129extern int user_context(unsigned long sp);
130
131#define UPT_IS_USER(r) ((r)->is_user)
132
133#define UPT_SYSCALL_ARG1(r) UPT_RDI(r)
134#define UPT_SYSCALL_ARG2(r) UPT_RSI(r)
135#define UPT_SYSCALL_ARG3(r) UPT_RDX(r)
136#define UPT_SYSCALL_ARG4(r) UPT_R10(r)
137#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
138#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
139
140struct syscall_args {
141 unsigned long args[6];
142};
143
144#define SYSCALL_ARGS(r) ((struct syscall_args) \
145 { .args = { UPT_SYSCALL_ARG1(r), \
146 UPT_SYSCALL_ARG2(r), \
147 UPT_SYSCALL_ARG3(r), \
148 UPT_SYSCALL_ARG4(r), \
149 UPT_SYSCALL_ARG5(r), \
150 UPT_SYSCALL_ARG6(r) } } )
151
152#define UPT_REG(regs, reg) \
153 ({ unsigned long val; \
154 switch(reg){ \
155 case R8: val = UPT_R8(regs); break; \
156 case R9: val = UPT_R9(regs); break; \
157 case R10: val = UPT_R10(regs); break; \
158 case R11: val = UPT_R11(regs); break; \
159 case R12: val = UPT_R12(regs); break; \
160 case R13: val = UPT_R13(regs); break; \
161 case R14: val = UPT_R14(regs); break; \
162 case R15: val = UPT_R15(regs); break; \
163 case RIP: val = UPT_IP(regs); break; \
164 case RSP: val = UPT_SP(regs); break; \
165 case RAX: val = UPT_RAX(regs); break; \
166 case RBX: val = UPT_RBX(regs); break; \
167 case RCX: val = UPT_RCX(regs); break; \
168 case RDX: val = UPT_RDX(regs); break; \
169 case RSI: val = UPT_RSI(regs); break; \
170 case RDI: val = UPT_RDI(regs); break; \
171 case RBP: val = UPT_RBP(regs); break; \
172 case ORIG_RAX: val = UPT_ORIG_RAX(regs); break; \
173 case CS: val = UPT_CS(regs); break; \
174 case SS: val = UPT_SS(regs); break; \
175 case FS_BASE: val = UPT_FS_BASE(regs); break; \
176 case GS_BASE: val = UPT_GS_BASE(regs); break; \
177 case DS: val = UPT_DS(regs); break; \
178 case ES: val = UPT_ES(regs); break; \
179 case FS : val = UPT_FS (regs); break; \
180 case GS: val = UPT_GS(regs); break; \
181 case EFLAGS: val = UPT_EFLAGS(regs); break; \
182 default : \
183 panic("Bad register in UPT_REG : %d\n", reg); \
184 val = -1; \
185 } \
186 val; \
187 })
188
189
190#define UPT_SET(regs, reg, val) \
191 ({ unsigned long __upt_val = val; \
192 switch(reg){ \
193 case R8: UPT_R8(regs) = __upt_val; break; \
194 case R9: UPT_R9(regs) = __upt_val; break; \
195 case R10: UPT_R10(regs) = __upt_val; break; \
196 case R11: UPT_R11(regs) = __upt_val; break; \
197 case R12: UPT_R12(regs) = __upt_val; break; \
198 case R13: UPT_R13(regs) = __upt_val; break; \
199 case R14: UPT_R14(regs) = __upt_val; break; \
200 case R15: UPT_R15(regs) = __upt_val; break; \
201 case RIP: UPT_IP(regs) = __upt_val; break; \
202 case RSP: UPT_SP(regs) = __upt_val; break; \
203 case RAX: UPT_RAX(regs) = __upt_val; break; \
204 case RBX: UPT_RBX(regs) = __upt_val; break; \
205 case RCX: UPT_RCX(regs) = __upt_val; break; \
206 case RDX: UPT_RDX(regs) = __upt_val; break; \
207 case RSI: UPT_RSI(regs) = __upt_val; break; \
208 case RDI: UPT_RDI(regs) = __upt_val; break; \
209 case RBP: UPT_RBP(regs) = __upt_val; break; \
210 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
211 case CS: UPT_CS(regs) = __upt_val; break; \
212 case SS: UPT_SS(regs) = __upt_val; break; \
213 case FS_BASE: UPT_FS_BASE(regs) = __upt_val; break; \
214 case GS_BASE: UPT_GS_BASE(regs) = __upt_val; break; \
215 case DS: UPT_DS(regs) = __upt_val; break; \
216 case ES: UPT_ES(regs) = __upt_val; break; \
217 case FS: UPT_FS(regs) = __upt_val; break; \
218 case GS: UPT_GS(regs) = __upt_val; break; \
219 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
220 default : \
221 panic("Bad register in UPT_SET : %d\n", reg); \
222 break; \
223 } \
224 __upt_val; \
225 })
226
227#define UPT_SET_SYSCALL_RETURN(r, res) \
228 REGS_SET_SYSCALL_RETURN((r)->regs, (res))
229
230#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
231
232#define UPT_SEGV_IS_FIXABLE(r) REGS_SEGV_IS_FIXABLE(&r->skas)
233
234#define UPT_FAULTINFO(r) (&(r)->faultinfo)
235
236static inline void arch_init_registers(int pid)
237{
238}
239
240#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/ptrace_user.h b/arch/um/sys-x86_64/shared/sysdep/ptrace_user.h
new file mode 100644
index 00000000000..4dbccdb58f4
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/ptrace_user.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __SYSDEP_X86_64_PTRACE_USER_H__
8#define __SYSDEP_X86_64_PTRACE_USER_H__
9
10#define __FRAME_OFFSETS
11#include <sys/ptrace.h>
12#include <linux/ptrace.h>
13#include <asm/ptrace.h>
14#undef __FRAME_OFFSETS
15#include "user_constants.h"
16
17#define PT_INDEX(off) ((off) / sizeof(unsigned long))
18
19#define PT_SYSCALL_NR(regs) ((regs)[PT_INDEX(ORIG_RAX)])
20#define PT_SYSCALL_NR_OFFSET (ORIG_RAX)
21
22#define PT_SYSCALL_ARG1(regs) (((unsigned long *) (regs))[PT_INDEX(RDI)])
23#define PT_SYSCALL_ARG1_OFFSET (RDI)
24
25#define PT_SYSCALL_ARG2(regs) (((unsigned long *) (regs))[PT_INDEX(RSI)])
26#define PT_SYSCALL_ARG2_OFFSET (RSI)
27
28#define PT_SYSCALL_ARG3(regs) (((unsigned long *) (regs))[PT_INDEX(RDX)])
29#define PT_SYSCALL_ARG3_OFFSET (RDX)
30
31#define PT_SYSCALL_ARG4(regs) (((unsigned long *) (regs))[PT_INDEX(RCX)])
32#define PT_SYSCALL_ARG4_OFFSET (RCX)
33
34#define PT_SYSCALL_ARG5(regs) (((unsigned long *) (regs))[PT_INDEX(R8)])
35#define PT_SYSCALL_ARG5_OFFSET (R8)
36
37#define PT_SYSCALL_ARG6(regs) (((unsigned long *) (regs))[PT_INDEX(R9)])
38#define PT_SYSCALL_ARG6_OFFSET (R9)
39
40#define PT_SYSCALL_RET_OFFSET (RAX)
41
42#define PT_IP_OFFSET (RIP)
43#define PT_IP(regs) ((regs)[PT_INDEX(RIP)])
44
45#define PT_SP_OFFSET (RSP)
46#define PT_SP(regs) ((regs)[PT_INDEX(RSP)])
47
48#define PT_ORIG_RAX_OFFSET (ORIG_RAX)
49#define PT_ORIG_RAX(regs) ((regs)[PT_INDEX(ORIG_RAX)])
50
51/*
52 * x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
53 * it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
54 * 2.4 name and value for 2.4 host compatibility.
55 */
56#ifndef PTRACE_OLDSETOPTIONS
57#define PTRACE_OLDSETOPTIONS 21
58#endif
59
60/*
61 * These are before the system call, so the system call number is RAX
62 * rather than ORIG_RAX, and arg4 is R10 rather than RCX
63 */
64#define REGS_SYSCALL_NR PT_INDEX(RAX)
65#define REGS_SYSCALL_ARG1 PT_INDEX(RDI)
66#define REGS_SYSCALL_ARG2 PT_INDEX(RSI)
67#define REGS_SYSCALL_ARG3 PT_INDEX(RDX)
68#define REGS_SYSCALL_ARG4 PT_INDEX(R10)
69#define REGS_SYSCALL_ARG5 PT_INDEX(R8)
70#define REGS_SYSCALL_ARG6 PT_INDEX(R9)
71
72#define REGS_IP_INDEX PT_INDEX(RIP)
73#define REGS_SP_INDEX PT_INDEX(RSP)
74
75#define FP_SIZE (HOST_FP_SIZE)
76
77#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/sc.h b/arch/um/sys-x86_64/shared/sysdep/sc.h
new file mode 100644
index 00000000000..8aee45b0743
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/sc.h
@@ -0,0 +1,45 @@
1#ifndef __SYSDEP_X86_64_SC_H
2#define __SYSDEP_X86_64_SC_H
3
4/* Copyright (C) 2003 - 2004 PathScale, Inc
5 * Released under the GPL
6 */
7
8#include <user_constants.h>
9
10#define SC_OFFSET(sc, field) \
11 *((unsigned long *) &(((char *) (sc))[HOST_##field]))
12
13#define SC_RBX(sc) SC_OFFSET(sc, SC_RBX)
14#define SC_RCX(sc) SC_OFFSET(sc, SC_RCX)
15#define SC_RDX(sc) SC_OFFSET(sc, SC_RDX)
16#define SC_RSI(sc) SC_OFFSET(sc, SC_RSI)
17#define SC_RDI(sc) SC_OFFSET(sc, SC_RDI)
18#define SC_RBP(sc) SC_OFFSET(sc, SC_RBP)
19#define SC_RAX(sc) SC_OFFSET(sc, SC_RAX)
20#define SC_R8(sc) SC_OFFSET(sc, SC_R8)
21#define SC_R9(sc) SC_OFFSET(sc, SC_R9)
22#define SC_R10(sc) SC_OFFSET(sc, SC_R10)
23#define SC_R11(sc) SC_OFFSET(sc, SC_R11)
24#define SC_R12(sc) SC_OFFSET(sc, SC_R12)
25#define SC_R13(sc) SC_OFFSET(sc, SC_R13)
26#define SC_R14(sc) SC_OFFSET(sc, SC_R14)
27#define SC_R15(sc) SC_OFFSET(sc, SC_R15)
28#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
29#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
30#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
31#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
32#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
33#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
34#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
35#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
36#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
37#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
38#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
39#if 0
40#define SC_ORIG_RAX(sc) SC_OFFSET(sc, SC_ORIG_RAX)
41#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
42#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
43#endif
44
45#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/sigcontext.h b/arch/um/sys-x86_64/shared/sysdep/sigcontext.h
new file mode 100644
index 00000000000..0155133b145
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/sigcontext.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __SYSDEP_X86_64_SIGCONTEXT_H
8#define __SYSDEP_X86_64_SIGCONTEXT_H
9
10#include <sysdep/sc.h>
11
12#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
13
14#define GET_FAULTINFO_FROM_SC(fi, sc) \
15 { \
16 (fi).cr2 = SC_CR2(sc); \
17 (fi).error_code = SC_ERR(sc); \
18 (fi).trap_no = SC_TRAPNO(sc); \
19 }
20
21/* This is Page Fault */
22#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
23
24/* No broken SKAS API, which doesn't pass trap_no, here. */
25#define SEGV_MAYBE_FIXABLE(fi) 0
26
27#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/skas_ptrace.h b/arch/um/sys-x86_64/shared/sysdep/skas_ptrace.h
new file mode 100644
index 00000000000..95db4be786e
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/skas_ptrace.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_X86_64_SKAS_PTRACE_H
7#define __SYSDEP_X86_64_SKAS_PTRACE_H
8
9struct ptrace_faultinfo {
10 int is_write;
11 unsigned long addr;
12};
13
14struct ptrace_ldt {
15 int func;
16 void *ptr;
17 unsigned long bytecount;
18};
19
20#define PTRACE_LDT 54
21
22#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/stub.h b/arch/um/sys-x86_64/shared/sysdep/stub.h
new file mode 100644
index 00000000000..3432aa24997
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/stub.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <sys/mman.h>
10#include <asm/unistd.h>
11#include <sysdep/ptrace_user.h>
12#include "as-layout.h"
13#include "stub-data.h"
14#include "kern_constants.h"
15
16extern void stub_segv_handler(int sig);
17extern void stub_clone_handler(void);
18
19#define STUB_SYSCALL_RET PT_INDEX(RAX)
20#define STUB_MMAP_NR __NR_mmap
21#define MMAP_OFFSET(o) (o)
22
23#define __syscall_clobber "r11","rcx","memory"
24#define __syscall "syscall"
25
26static inline long stub_syscall0(long syscall)
27{
28 long ret;
29
30 __asm__ volatile (__syscall
31 : "=a" (ret)
32 : "0" (syscall) : __syscall_clobber );
33
34 return ret;
35}
36
37static inline long stub_syscall2(long syscall, long arg1, long arg2)
38{
39 long ret;
40
41 __asm__ volatile (__syscall
42 : "=a" (ret)
43 : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
44
45 return ret;
46}
47
48static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
49{
50 long ret;
51
52 __asm__ volatile (__syscall
53 : "=a" (ret)
54 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
55 : __syscall_clobber );
56
57 return ret;
58}
59
60static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
61 long arg4)
62{
63 long ret;
64
65 __asm__ volatile ("movq %5,%%r10 ; " __syscall
66 : "=a" (ret)
67 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
68 "g" (arg4)
69 : __syscall_clobber, "r10" );
70
71 return ret;
72}
73
74static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
75 long arg4, long arg5)
76{
77 long ret;
78
79 __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
80 : "=a" (ret)
81 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
82 "g" (arg4), "g" (arg5)
83 : __syscall_clobber, "r10", "r8" );
84
85 return ret;
86}
87
88static inline void trap_myself(void)
89{
90 __asm("int3");
91}
92
93static inline void remap_stack(long fd, unsigned long offset)
94{
95 __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
96 "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
97 "movq %%rax, (%%rbx)":
98 : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
99 "S" (UM_KERN_PAGE_SIZE),
100 "d" (PROT_READ | PROT_WRITE),
101 "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
102 "g" (offset),
103 "i" (&((struct stub_data *) STUB_DATA)->err)
104 : __syscall_clobber, "r10", "r8", "r9" );
105}
106
107#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/syscalls.h b/arch/um/sys-x86_64/shared/sysdep/syscalls.h
new file mode 100644
index 00000000000..7cfb0b08565
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/syscalls.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#ifndef __SYSDEP_X86_64_SYSCALLS_H__
8#define __SYSDEP_X86_64_SYSCALLS_H__
9
10#include <linux/msg.h>
11#include <linux/shm.h>
12#include <kern_constants.h>
13
14typedef long syscall_handler_t(void);
15
16extern syscall_handler_t *sys_call_table[];
17
18#define EXECUTE_SYSCALL(syscall, regs) \
19 (((long (*)(long, long, long, long, long, long)) \
20 (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
21 UPT_SYSCALL_ARG2(&regs->regs), \
22 UPT_SYSCALL_ARG3(&regs->regs), \
23 UPT_SYSCALL_ARG4(&regs->regs), \
24 UPT_SYSCALL_ARG5(&regs->regs), \
25 UPT_SYSCALL_ARG6(&regs->regs)))
26
27extern long old_mmap(unsigned long addr, unsigned long len,
28 unsigned long prot, unsigned long flags,
29 unsigned long fd, unsigned long pgoff);
30extern syscall_handler_t sys_modify_ldt;
31extern syscall_handler_t sys_arch_prctl;
32
33#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
new file mode 100644
index 00000000000..d1b93c43620
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
@@ -0,0 +1,132 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132#endif
diff --git a/arch/um/sys-x86_64/shared/sysdep/tls.h b/arch/um/sys-x86_64/shared/sysdep/tls.h
new file mode 100644
index 00000000000..18c000d0357
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/tls.h
@@ -0,0 +1,29 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20 unsigned int lm:1;
21} user_desc_t;
22
23# else /* __KERNEL__ */
24
25# include <ldt.h>
26typedef struct user_desc user_desc_t;
27
28# endif /* __KERNEL__ */
29#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-x86_64/shared/sysdep/vm-flags.h b/arch/um/sys-x86_64/shared/sysdep/vm-flags.h
new file mode 100644
index 00000000000..3978e55132d
--- /dev/null
+++ b/arch/um/sys-x86_64/shared/sysdep/vm-flags.h
@@ -0,0 +1,15 @@
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#ifndef __VM_FLAGS_X86_64_H
8#define __VM_FLAGS_X86_64_H
9
10#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
11 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
12#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
13 VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
14
15#endif
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
new file mode 100644
index 00000000000..b6b65c7c7a7
--- /dev/null
+++ b/arch/um/sys-x86_64/signal.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright (C) 2003 PathScale, Inc.
3 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <linux/personality.h>
8#include <linux/ptrace.h>
9#include <linux/kernel.h>
10#include <asm/unistd.h>
11#include <asm/uaccess.h>
12#include <asm/ucontext.h>
13#include "frame_kern.h"
14#include "skas.h"
15
16void copy_sc(struct uml_pt_regs *regs, void *from)
17{
18 struct sigcontext *sc = from;
19
20#define GETREG(regs, regno, sc, regname) \
21 (regs)->gp[(regno) / sizeof(unsigned long)] = (sc)->regname
22
23 GETREG(regs, R8, sc, r8);
24 GETREG(regs, R9, sc, r9);
25 GETREG(regs, R10, sc, r10);
26 GETREG(regs, R11, sc, r11);
27 GETREG(regs, R12, sc, r12);
28 GETREG(regs, R13, sc, r13);
29 GETREG(regs, R14, sc, r14);
30 GETREG(regs, R15, sc, r15);
31 GETREG(regs, RDI, sc, di);
32 GETREG(regs, RSI, sc, si);
33 GETREG(regs, RBP, sc, bp);
34 GETREG(regs, RBX, sc, bx);
35 GETREG(regs, RDX, sc, dx);
36 GETREG(regs, RAX, sc, ax);
37 GETREG(regs, RCX, sc, cx);
38 GETREG(regs, RSP, sc, sp);
39 GETREG(regs, RIP, sc, ip);
40 GETREG(regs, EFLAGS, sc, flags);
41 GETREG(regs, CS, sc, cs);
42
43#undef GETREG
44}
45
46static int copy_sc_from_user(struct pt_regs *regs,
47 struct sigcontext __user *from,
48 struct _fpstate __user *fpp)
49{
50 struct user_i387_struct fp;
51 int err = 0;
52
53#define GETREG(regs, regno, sc, regname) \
54 __get_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
55 &(sc)->regname)
56
57 err |= GETREG(regs, R8, from, r8);
58 err |= GETREG(regs, R9, from, r9);
59 err |= GETREG(regs, R10, from, r10);
60 err |= GETREG(regs, R11, from, r11);
61 err |= GETREG(regs, R12, from, r12);
62 err |= GETREG(regs, R13, from, r13);
63 err |= GETREG(regs, R14, from, r14);
64 err |= GETREG(regs, R15, from, r15);
65 err |= GETREG(regs, RDI, from, di);
66 err |= GETREG(regs, RSI, from, si);
67 err |= GETREG(regs, RBP, from, bp);
68 err |= GETREG(regs, RBX, from, bx);
69 err |= GETREG(regs, RDX, from, dx);
70 err |= GETREG(regs, RAX, from, ax);
71 err |= GETREG(regs, RCX, from, cx);
72 err |= GETREG(regs, RSP, from, sp);
73 err |= GETREG(regs, RIP, from, ip);
74 err |= GETREG(regs, EFLAGS, from, flags);
75 err |= GETREG(regs, CS, from, cs);
76 if (err)
77 return 1;
78
79#undef GETREG
80
81 err = copy_from_user(&fp, fpp, sizeof(struct user_i387_struct));
82 if (err)
83 return 1;
84
85 err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
86 (unsigned long *) &fp);
87 if (err < 0) {
88 printk(KERN_ERR "copy_sc_from_user - "
89 "restore_fp_registers failed, errno = %d\n",
90 -err);
91 return 1;
92 }
93
94 return 0;
95}
96
97static int copy_sc_to_user(struct sigcontext __user *to,
98 struct _fpstate __user *to_fp, struct pt_regs *regs,
99 unsigned long mask, unsigned long sp)
100{
101 struct faultinfo * fi = &current->thread.arch.faultinfo;
102 struct user_i387_struct fp;
103 int err = 0;
104
105 err |= __put_user(0, &to->gs);
106 err |= __put_user(0, &to->fs);
107
108#define PUTREG(regs, regno, sc, regname) \
109 __put_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
110 &(sc)->regname)
111
112 err |= PUTREG(regs, RDI, to, di);
113 err |= PUTREG(regs, RSI, to, si);
114 err |= PUTREG(regs, RBP, to, bp);
115 /*
116 * Must use original RSP, which is passed in, rather than what's in
117 * the pt_regs, because that's already been updated to point at the
118 * signal frame.
119 */
120 err |= __put_user(sp, &to->sp);
121 err |= PUTREG(regs, RBX, to, bx);
122 err |= PUTREG(regs, RDX, to, dx);
123 err |= PUTREG(regs, RCX, to, cx);
124 err |= PUTREG(regs, RAX, to, ax);
125 err |= PUTREG(regs, R8, to, r8);
126 err |= PUTREG(regs, R9, to, r9);
127 err |= PUTREG(regs, R10, to, r10);
128 err |= PUTREG(regs, R11, to, r11);
129 err |= PUTREG(regs, R12, to, r12);
130 err |= PUTREG(regs, R13, to, r13);
131 err |= PUTREG(regs, R14, to, r14);
132 err |= PUTREG(regs, R15, to, r15);
133 err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */
134
135 err |= __put_user(fi->cr2, &to->cr2);
136 err |= __put_user(fi->error_code, &to->err);
137 err |= __put_user(fi->trap_no, &to->trapno);
138
139 err |= PUTREG(regs, RIP, to, ip);
140 err |= PUTREG(regs, EFLAGS, to, flags);
141#undef PUTREG
142
143 err |= __put_user(mask, &to->oldmask);
144 if (err)
145 return 1;
146
147 err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
148 (unsigned long *) &fp);
149 if (err < 0) {
150 printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
151 "failed, errno = %d\n", -err);
152 return 1;
153 }
154
155 if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
156 return 1;
157
158 return err;
159}
160
161struct rt_sigframe
162{
163 char __user *pretcode;
164 struct ucontext uc;
165 struct siginfo info;
166 struct _fpstate fpstate;
167};
168
169int setup_signal_stack_si(unsigned long stack_top, int sig,
170 struct k_sigaction *ka, struct pt_regs * regs,
171 siginfo_t *info, sigset_t *set)
172{
173 struct rt_sigframe __user *frame;
174 unsigned long save_sp = PT_REGS_RSP(regs);
175 int err = 0;
176 struct task_struct *me = current;
177
178 frame = (struct rt_sigframe __user *)
179 round_down(stack_top - sizeof(struct rt_sigframe), 16);
180 /* Subtract 128 for a red zone and 8 for proper alignment */
181 frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
182
183 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
184 goto out;
185
186 if (ka->sa.sa_flags & SA_SIGINFO) {
187 err |= copy_siginfo_to_user(&frame->info, info);
188 if (err)
189 goto out;
190 }
191
192 /*
193 * Update SP now because the page fault handler refuses to extend
194 * the stack if the faulting address is too far below the current
195 * SP, which frame now certainly is. If there's an error, the original
196 * value is restored on the way out.
197 * When writing the sigcontext to the stack, we have to write the
198 * original value, so that's passed to copy_sc_to_user, which does
199 * the right thing with it.
200 */
201 PT_REGS_RSP(regs) = (unsigned long) frame;
202
203 /* Create the ucontext. */
204 err |= __put_user(0, &frame->uc.uc_flags);
205 err |= __put_user(0, &frame->uc.uc_link);
206 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
207 err |= __put_user(sas_ss_flags(save_sp),
208 &frame->uc.uc_stack.ss_flags);
209 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
210 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
211 set->sig[0], save_sp);
212 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
213 if (sizeof(*set) == 16) {
214 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
215 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
216 }
217 else
218 err |= __copy_to_user(&frame->uc.uc_sigmask, set,
219 sizeof(*set));
220
221 /*
222 * Set up to return from userspace. If provided, use a stub
223 * already in userspace.
224 */
225 /* x86-64 should always use SA_RESTORER. */
226 if (ka->sa.sa_flags & SA_RESTORER)
227 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
228 else
229 /* could use a vstub here */
230 goto restore_sp;
231
232 if (err)
233 goto restore_sp;
234
235 /* Set up registers for signal handler */
236 {
237 struct exec_domain *ed = current_thread_info()->exec_domain;
238 if (unlikely(ed && ed->signal_invmap && sig < 32))
239 sig = ed->signal_invmap[sig];
240 }
241
242 PT_REGS_RDI(regs) = sig;
243 /* In case the signal handler was declared without prototypes */
244 PT_REGS_RAX(regs) = 0;
245
246 /*
247 * This also works for non SA_SIGINFO handlers because they expect the
248 * next argument after the signal number on the stack.
249 */
250 PT_REGS_RSI(regs) = (unsigned long) &frame->info;
251 PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
252 PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
253 out:
254 return err;
255
256restore_sp:
257 PT_REGS_RSP(regs) = save_sp;
258 return err;
259}
260
261long sys_rt_sigreturn(struct pt_regs *regs)
262{
263 unsigned long sp = PT_REGS_SP(&current->thread.regs);
264 struct rt_sigframe __user *frame =
265 (struct rt_sigframe __user *)(sp - 8);
266 struct ucontext __user *uc = &frame->uc;
267 sigset_t set;
268
269 if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
270 goto segfault;
271
272 sigdelsetmask(&set, ~_BLOCKABLE);
273
274 spin_lock_irq(&current->sighand->siglock);
275 current->blocked = set;
276 recalc_sigpending();
277 spin_unlock_irq(&current->sighand->siglock);
278
279 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext,
280 &frame->fpstate))
281 goto segfault;
282
283 /* Avoid ERESTART handling */
284 PT_REGS_SYSCALL_NR(&current->thread.regs) = -1;
285 return PT_REGS_SYSCALL_RET(&current->thread.regs);
286
287 segfault:
288 force_sig(SIGSEGV, current);
289 return 0;
290}
diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S
new file mode 100644
index 00000000000..20e4a96a6dc
--- /dev/null
+++ b/arch/um/sys-x86_64/stub.S
@@ -0,0 +1,66 @@
1#include "as-layout.h"
2
3 .globl syscall_stub
4.section .__syscall_stub, "ax"
5syscall_stub:
6 syscall
7 /* We don't have 64-bit constants, so this constructs the address
8 * we need.
9 */
10 movq $(STUB_DATA >> 32), %rbx
11 salq $32, %rbx
12 movq $(STUB_DATA & 0xffffffff), %rcx
13 or %rcx, %rbx
14 movq %rax, (%rbx)
15 int3
16
17 .globl batch_syscall_stub
18batch_syscall_stub:
19 mov $(STUB_DATA >> 32), %rbx
20 sal $32, %rbx
21 mov $(STUB_DATA & 0xffffffff), %rax
22 or %rax, %rbx
23 /* load pointer to first operation */
24 mov %rbx, %rsp
25 add $0x10, %rsp
26again:
27 /* load length of additional data */
28 mov 0x0(%rsp), %rax
29
30 /* if(length == 0) : end of list */
31 /* write possible 0 to header */
32 mov %rax, 8(%rbx)
33 cmp $0, %rax
34 jz done
35
36 /* save current pointer */
37 mov %rsp, 8(%rbx)
38
39 /* skip additional data */
40 add %rax, %rsp
41
42 /* load syscall-# */
43 pop %rax
44
45 /* load syscall params */
46 pop %rdi
47 pop %rsi
48 pop %rdx
49 pop %r10
50 pop %r8
51 pop %r9
52
53 /* execute syscall */
54 syscall
55
56 /* check return value */
57 pop %rcx
58 cmp %rcx, %rax
59 je again
60
61done:
62 /* save return value */
63 mov %rax, (%rbx)
64
65 /* stop */
66 int3
diff --git a/arch/um/sys-x86_64/stub_segv.c b/arch/um/sys-x86_64/stub_segv.c
new file mode 100644
index 00000000000..ced051afc70
--- /dev/null
+++ b/arch/um/sys-x86_64/stub_segv.c
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <signal.h>
7#include "as-layout.h"
8#include "sysdep/stub.h"
9#include "sysdep/faultinfo.h"
10#include "sysdep/sigcontext.h"
11
12void __attribute__ ((__section__ (".__syscall_stub")))
13stub_segv_handler(int sig)
14{
15 struct ucontext *uc;
16
17 __asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :);
18 GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA),
19 &uc->uc_mcontext);
20 trap_myself();
21}
22
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c
new file mode 100644
index 00000000000..47d469e7e7c
--- /dev/null
+++ b/arch/um/sys-x86_64/syscall_table.c
@@ -0,0 +1,65 @@
1/*
2 * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
3 * with some changes for UML.
4 */
5
6#include <linux/linkage.h>
7#include <linux/sys.h>
8#include <linux/cache.h>
9#include <kern_constants.h>
10
11#define __NO_STUBS
12
13/*
14 * Below you can see, in terms of #define's, the differences between the x86-64
15 * and the UML syscall table.
16 */
17
18/* Not going to be implemented by UML, since we have no hardware. */
19#define stub_iopl sys_ni_syscall
20#define sys_ioperm sys_ni_syscall
21
22/*
23 * The UML TLS problem. Note that x86_64 does not implement this, so the below
24 * is needed only for the ia32 compatibility.
25 */
26
27/* On UML we call it this way ("old" means it's not mmap2) */
28#define sys_mmap old_mmap
29
30#define stub_clone sys_clone
31#define stub_fork sys_fork
32#define stub_vfork sys_vfork
33#define stub_execve sys_execve
34#define stub_rt_sigsuspend sys_rt_sigsuspend
35#define stub_sigaltstack sys_sigaltstack
36#define stub_rt_sigreturn sys_rt_sigreturn
37
38#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
39#undef _ASM_X86_UNISTD_64_H
40#include "../../x86/include/asm/unistd_64.h"
41
42#undef __SYSCALL
43#define __SYSCALL(nr, sym) [ nr ] = sym,
44#undef _ASM_X86_UNISTD_64_H
45
46typedef void (*sys_call_ptr_t)(void);
47
48extern void sys_ni_syscall(void);
49
50/*
51 * We used to have a trick here which made sure that holes in the
52 * x86_64 table were filled in with sys_ni_syscall, but a comment in
53 * unistd_64.h says that holes aren't allowed, so the trick was
54 * removed.
55 * The trick looked like this
56 * [0 ... UM_NR_syscall_max] = &sys_ni_syscall
57 * before including unistd_64.h - the later initializations overwrote
58 * the sys_ni_syscall filler.
59 */
60
61sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
62#include "../../x86/include/asm/unistd_64.h"
63};
64
65int syscall_table_size = sizeof(sys_call_table);
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
new file mode 100644
index 00000000000..f3d82bb6e15
--- /dev/null
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 *
5 * Licensed under the GPL
6 */
7
8#include "linux/linkage.h"
9#include "linux/personality.h"
10#include "linux/utsname.h"
11#include "asm/prctl.h" /* XXX This should get the constants from libc */
12#include "asm/uaccess.h"
13#include "os.h"
14
15long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
16{
17 unsigned long *ptr = addr, tmp;
18 long ret;
19 int pid = task->mm->context.id.u.pid;
20
21 /*
22 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
23 * be safe), we need to call arch_prctl on the host because
24 * setting %fs may result in something else happening (like a
25 * GDT or thread.fs being set instead). So, we let the host
26 * fiddle the registers and thread struct and restore the
27 * registers afterwards.
28 *
29 * So, the saved registers are stored to the process (this
30 * needed because a stub may have been the last thing to run),
31 * arch_prctl is run on the host, then the registers are read
32 * back.
33 */
34 switch (code) {
35 case ARCH_SET_FS:
36 case ARCH_SET_GS:
37 ret = restore_registers(pid, &current->thread.regs.regs);
38 if (ret)
39 return ret;
40 break;
41 case ARCH_GET_FS:
42 case ARCH_GET_GS:
43 /*
44 * With these two, we read to a local pointer and
45 * put_user it to the userspace pointer that we were
46 * given. If addr isn't valid (because it hasn't been
47 * faulted in or is just bogus), we want put_user to
48 * fault it in (or return -EFAULT) instead of having
49 * the host return -EFAULT.
50 */
51 ptr = &tmp;
52 }
53
54 ret = os_arch_prctl(pid, code, ptr);
55 if (ret)
56 return ret;
57
58 switch (code) {
59 case ARCH_SET_FS:
60 current->thread.arch.fs = (unsigned long) ptr;
61 ret = save_registers(pid, &current->thread.regs.regs);
62 break;
63 case ARCH_SET_GS:
64 ret = save_registers(pid, &current->thread.regs.regs);
65 break;
66 case ARCH_GET_FS:
67 ret = put_user(tmp, addr);
68 break;
69 case ARCH_GET_GS:
70 ret = put_user(tmp, addr);
71 break;
72 }
73
74 return ret;
75}
76
77long sys_arch_prctl(int code, unsigned long addr)
78{
79 return arch_prctl(current, code, (unsigned long __user *) addr);
80}
81
82long sys_clone(unsigned long clone_flags, unsigned long newsp,
83 void __user *parent_tid, void __user *child_tid)
84{
85 long ret;
86
87 if (!newsp)
88 newsp = UPT_SP(&current->thread.regs.regs);
89 current->thread.forking = 1;
90 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
91 child_tid);
92 current->thread.forking = 0;
93 return ret;
94}
95
96void arch_switch_to(struct task_struct *to)
97{
98 if ((to->thread.arch.fs == 0) || (to->mm == NULL))
99 return;
100
101 arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
102}
diff --git a/arch/um/sys-x86_64/sysrq.c b/arch/um/sys-x86_64/sysrq.c
new file mode 100644
index 00000000000..f4f82beb350
--- /dev/null
+++ b/arch/um/sys-x86_64/sysrq.c
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2003 PathScale, Inc.
3 *
4 * Licensed under the GPL
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/utsname.h>
11#include <asm/current.h>
12#include <asm/ptrace.h>
13#include "sysrq.h"
14
15void __show_regs(struct pt_regs *regs)
16{
17 printk("\n");
18 print_modules();
19 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
20 current->comm, print_tainted(), init_utsname()->release);
21 printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
22 PT_REGS_RIP(regs));
23 printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
24 PT_REGS_EFLAGS(regs));
25 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
26 PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
27 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
28 PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
29 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
30 PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
31 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
32 PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
33 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
34 PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
35}
36
37void show_regs(struct pt_regs *regs)
38{
39 __show_regs(regs);
40 show_trace(current, (unsigned long *) &regs);
41}
diff --git a/arch/um/sys-x86_64/tls.c b/arch/um/sys-x86_64/tls.c
new file mode 100644
index 00000000000..f7ba46200ec
--- /dev/null
+++ b/arch/um/sys-x86_64/tls.c
@@ -0,0 +1,17 @@
1#include "linux/sched.h"
2
3void clear_flushed_tls(struct task_struct *task)
4{
5}
6
7int arch_copy_tls(struct task_struct *t)
8{
9 /*
10 * If CLONE_SETTLS is set, we need to save the thread id
11 * (which is argument 5, child_tid, of clone) so it can be set
12 * during context switches.
13 */
14 t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
15
16 return 0;
17}
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
new file mode 100644
index 00000000000..973585414a6
--- /dev/null
+++ b/arch/um/sys-x86_64/user-offsets.c
@@ -0,0 +1,65 @@
1#include <stdio.h>
2#include <stddef.h>
3#include <signal.h>
4#include <sys/poll.h>
5#include <sys/mman.h>
6#include <sys/user.h>
7#define __FRAME_OFFSETS
8#include <asm/ptrace.h>
9#include <asm/types.h>
10
11#define DEFINE(sym, val) \
12 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
13
14#define DEFINE_LONGS(sym, val) \
15 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
16
17#define OFFSET(sym, str, mem) \
18 DEFINE(sym, offsetof(struct str, mem));
19
20void foo(void)
21{
22 OFFSET(HOST_SC_CR2, sigcontext, cr2);
23 OFFSET(HOST_SC_ERR, sigcontext, err);
24 OFFSET(HOST_SC_TRAPNO, sigcontext, trapno);
25
26 DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
27 DEFINE_LONGS(HOST_RBX, RBX);
28 DEFINE_LONGS(HOST_RCX, RCX);
29 DEFINE_LONGS(HOST_RDI, RDI);
30 DEFINE_LONGS(HOST_RSI, RSI);
31 DEFINE_LONGS(HOST_RDX, RDX);
32 DEFINE_LONGS(HOST_RBP, RBP);
33 DEFINE_LONGS(HOST_RAX, RAX);
34 DEFINE_LONGS(HOST_R8, R8);
35 DEFINE_LONGS(HOST_R9, R9);
36 DEFINE_LONGS(HOST_R10, R10);
37 DEFINE_LONGS(HOST_R11, R11);
38 DEFINE_LONGS(HOST_R12, R12);
39 DEFINE_LONGS(HOST_R13, R13);
40 DEFINE_LONGS(HOST_R14, R14);
41 DEFINE_LONGS(HOST_R15, R15);
42 DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX);
43 DEFINE_LONGS(HOST_CS, CS);
44 DEFINE_LONGS(HOST_SS, SS);
45 DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
46#if 0
47 DEFINE_LONGS(HOST_FS, FS);
48 DEFINE_LONGS(HOST_GS, GS);
49 DEFINE_LONGS(HOST_DS, DS);
50 DEFINE_LONGS(HOST_ES, ES);
51#endif
52
53 DEFINE_LONGS(HOST_IP, RIP);
54 DEFINE_LONGS(HOST_SP, RSP);
55 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
56
57 /* XXX Duplicated between i386 and x86_64 */
58 DEFINE(UM_POLLIN, POLLIN);
59 DEFINE(UM_POLLPRI, POLLPRI);
60 DEFINE(UM_POLLOUT, POLLOUT);
61
62 DEFINE(UM_PROT_READ, PROT_READ);
63 DEFINE(UM_PROT_WRITE, PROT_WRITE);
64 DEFINE(UM_PROT_EXEC, PROT_EXEC);
65}
diff --git a/arch/um/sys-x86_64/vdso/Makefile b/arch/um/sys-x86_64/vdso/Makefile
new file mode 100644
index 00000000000..5dffe6d4668
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/Makefile
@@ -0,0 +1,90 @@
1#
2# Building vDSO images for x86.
3#
4
5VDSO64-y := y
6
7vdso-install-$(VDSO64-y) += vdso.so
8
9
10# files to link into the vdso
11vobjs-y := vdso-note.o um_vdso.o
12
13# files to link into kernel
14obj-$(VDSO64-y) += vdso.o vma.o
15
16vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
17
18$(obj)/vdso.o: $(obj)/vdso.so
19
20targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
21
22export CPPFLAGS_vdso.lds += -P -C
23
24VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
25 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
26
27$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
28
29$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
30 $(call if_changed,vdso)
31
32$(obj)/%.so: OBJCOPYFLAGS := -S
33$(obj)/%.so: $(obj)/%.so.dbg FORCE
34 $(call if_changed,objcopy)
35
36#
37# Don't omit frame pointers for ease of userspace debugging, but do
38# optimize sibling calls.
39#
40CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
41 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
42 -fno-omit-frame-pointer -foptimize-sibling-calls
43
44$(vobjs): KBUILD_CFLAGS += $(CFL)
45
46#
47# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
48#
49CFLAGS_REMOVE_vdso-note.o = -pg
50CFLAGS_REMOVE_um_vdso.o = -pg
51
52targets += vdso-syms.lds
53obj-$(VDSO64-y) += vdso-syms.lds
54
55#
56# Match symbols in the DSO that look like VDSO*; produce a file of constants.
57#
58sed-vdsosym := -e 's/^00*/0/' \
59 -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
60quiet_cmd_vdsosym = VDSOSYM $@
61define cmd_vdsosym
62 $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
63endef
64
65$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
66 $(call if_changed,vdsosym)
67
68#
69# The DSO images are built using a special linker script.
70#
71quiet_cmd_vdso = VDSO $@
72 cmd_vdso = $(CC) -nostdlib -o $@ \
73 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
74 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
75 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
76
77VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
78GCOV_PROFILE := n
79
80#
81# Install the unstripped copy of vdso*.so listed in $(vdso-install-y).
82#
83quiet_cmd_vdso_install = INSTALL $@
84 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
85$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE
86 @mkdir -p $(MODLIB)/vdso
87 $(call cmd,vdso_install)
88
89PHONY += vdso_install $(vdso-install-y)
90vdso_install: $(vdso-install-y)
diff --git a/arch/um/sys-x86_64/vdso/checkundef.sh b/arch/um/sys-x86_64/vdso/checkundef.sh
new file mode 100644
index 00000000000..7ee90a9b549
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/checkundef.sh
@@ -0,0 +1,10 @@
1#!/bin/sh
2nm="$1"
3file="$2"
4$nm "$file" | grep '^ *U' > /dev/null 2>&1
5if [ $? -eq 1 ]; then
6 exit 0
7else
8 echo "$file: undefined symbols found" >&2
9 exit 1
10fi
diff --git a/arch/um/sys-x86_64/vdso/um_vdso.c b/arch/um/sys-x86_64/vdso/um_vdso.c
new file mode 100644
index 00000000000..7c441b59d37
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/um_vdso.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This vDSO turns all calls into a syscall so that UML can trap them.
9 */
10
11
12/* Disable profiling for userspace code */
13#define DISABLE_BRANCH_PROFILING
14
15#include <linux/time.h>
16#include <linux/getcpu.h>
17#include <asm/unistd.h>
18
19int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
20{
21 long ret;
22
23 asm("syscall" : "=a" (ret) :
24 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
25
26 return ret;
27}
28int clock_gettime(clockid_t, struct timespec *)
29 __attribute__((weak, alias("__vdso_clock_gettime")));
30
31int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
32{
33 long ret;
34
35 asm("syscall" : "=a" (ret) :
36 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
37
38 return ret;
39}
40int gettimeofday(struct timeval *, struct timezone *)
41 __attribute__((weak, alias("__vdso_gettimeofday")));
42
43time_t __vdso_time(time_t *t)
44{
45 long secs;
46
47 asm volatile("syscall"
48 : "=a" (secs)
49 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
50
51 return secs;
52}
53int time(time_t *t) __attribute__((weak, alias("__vdso_time")));
54
55long
56__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
57{
58 /*
59 * UML does not support SMP, we can cheat here. :)
60 */
61
62 if (cpu)
63 *cpu = 0;
64 if (node)
65 *node = 0;
66
67 return 0;
68}
69
70long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
71 __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/um/sys-x86_64/vdso/vdso-layout.lds.S b/arch/um/sys-x86_64/vdso/vdso-layout.lds.S
new file mode 100644
index 00000000000..634a2cf6204
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/vdso-layout.lds.S
@@ -0,0 +1,64 @@
1/*
2 * Linker script for vDSO. This is an ELF shared object prelinked to
3 * its virtual address, and with only one read-only segment.
4 * This script controls its layout.
5 */
6
7SECTIONS
8{
9 . = VDSO_PRELINK + SIZEOF_HEADERS;
10
11 .hash : { *(.hash) } :text
12 .gnu.hash : { *(.gnu.hash) }
13 .dynsym : { *(.dynsym) }
14 .dynstr : { *(.dynstr) }
15 .gnu.version : { *(.gnu.version) }
16 .gnu.version_d : { *(.gnu.version_d) }
17 .gnu.version_r : { *(.gnu.version_r) }
18
19 .note : { *(.note.*) } :text :note
20
21 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
22 .eh_frame : { KEEP (*(.eh_frame)) } :text
23
24 .dynamic : { *(.dynamic) } :text :dynamic
25
26 .rodata : { *(.rodata*) } :text
27 .data : {
28 *(.data*)
29 *(.sdata*)
30 *(.got.plt) *(.got)
31 *(.gnu.linkonce.d.*)
32 *(.bss*)
33 *(.dynbss*)
34 *(.gnu.linkonce.b.*)
35 }
36
37 .altinstructions : { *(.altinstructions) }
38 .altinstr_replacement : { *(.altinstr_replacement) }
39
40 /*
41 * Align the actual code well away from the non-instruction data.
42 * This is the best thing for the I-cache.
43 */
44 . = ALIGN(0x100);
45
46 .text : { *(.text*) } :text =0x90909090
47}
48
49/*
50 * Very old versions of ld do not recognize this name token; use the constant.
51 */
52#define PT_GNU_EH_FRAME 0x6474e550
53
54/*
55 * We must supply the ELF program headers explicitly to get just one
56 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
57 */
58PHDRS
59{
60 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
61 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
62 note PT_NOTE FLAGS(4); /* PF_R */
63 eh_frame_hdr PT_GNU_EH_FRAME;
64}
diff --git a/arch/um/sys-x86_64/vdso/vdso-note.S b/arch/um/sys-x86_64/vdso/vdso-note.S
new file mode 100644
index 00000000000..79a071e4357
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/vdso-note.S
@@ -0,0 +1,12 @@
1/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland.
4 */
5
6#include <linux/uts.h>
7#include <linux/version.h>
8#include <linux/elfnote.h>
9
10ELFNOTE_START(Linux, 0, "a")
11 .long LINUX_VERSION_CODE
12ELFNOTE_END
diff --git a/arch/um/sys-x86_64/vdso/vdso.S b/arch/um/sys-x86_64/vdso/vdso.S
new file mode 100644
index 00000000000..ec82c1686bd
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/vdso.S
@@ -0,0 +1,10 @@
1#include <linux/init.h>
2
3__INITDATA
4
5 .globl vdso_start, vdso_end
6vdso_start:
7 .incbin "arch/um/sys-x86_64/vdso/vdso.so"
8vdso_end:
9
10__FINIT
diff --git a/arch/um/sys-x86_64/vdso/vdso.lds.S b/arch/um/sys-x86_64/vdso/vdso.lds.S
new file mode 100644
index 00000000000..b96b2677cad
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/vdso.lds.S
@@ -0,0 +1,32 @@
1/*
2 * Linker script for 64-bit vDSO.
3 * We #include the file to define the layout details.
4 * Here we only choose the prelinked virtual address.
5 *
6 * This file defines the version script giving the user-exported symbols in
7 * the DSO. We can define local symbols here called VDSO* to make their
8 * values visible using the asm-x86/vdso.h macros from the kernel proper.
9 */
10
11#define VDSO_PRELINK 0xffffffffff700000
12#include "vdso-layout.lds.S"
13
14/*
15 * This controls what userland symbols we export from the vDSO.
16 */
17VERSION {
18 LINUX_2.6 {
19 global:
20 clock_gettime;
21 __vdso_clock_gettime;
22 gettimeofday;
23 __vdso_gettimeofday;
24 getcpu;
25 __vdso_getcpu;
26 time;
27 __vdso_time;
28 local: *;
29 };
30}
31
32VDSO64_PRELINK = VDSO_PRELINK;
diff --git a/arch/um/sys-x86_64/vdso/vma.c b/arch/um/sys-x86_64/vdso/vma.c
new file mode 100644
index 00000000000..91f4ec9a0a5
--- /dev/null
+++ b/arch/um/sys-x86_64/vdso/vma.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <asm/page.h>
13#include <linux/init.h>
14
15unsigned int __read_mostly vdso_enabled = 1;
16unsigned long um_vdso_addr;
17
18extern unsigned long task_size;
19extern char vdso_start[], vdso_end[];
20
21static struct page **vdsop;
22
23static int __init init_vdso(void)
24{
25 struct page *um_vdso;
26
27 BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
28
29 um_vdso_addr = task_size - PAGE_SIZE;
30
31 vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
32 if (!vdsop)
33 goto oom;
34
35 um_vdso = alloc_page(GFP_KERNEL);
36 if (!um_vdso) {
37 kfree(vdsop);
38
39 goto oom;
40 }
41
42 copy_page(page_address(um_vdso), vdso_start);
43 *vdsop = um_vdso;
44
45 return 0;
46
47oom:
48 printk(KERN_ERR "Cannot allocate vdso\n");
49 vdso_enabled = 0;
50
51 return -ENOMEM;
52}
53subsys_initcall(init_vdso);
54
55int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
56{
57 int err;
58 struct mm_struct *mm = current->mm;
59
60 if (!vdso_enabled)
61 return 0;
62
63 down_write(&mm->mmap_sem);
64
65 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
66 VM_READ|VM_EXEC|
67 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
68 VM_ALWAYSDUMP,
69 vdsop);
70
71 up_write(&mm->mmap_sem);
72
73 return err;
74}