aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 14:51:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 14:51:57 -0400
commit81f56e5375e84689b891e0e6c5a02ec12a1f18d9 (patch)
treea1e128a71ff24fc705428df86a858076cfe4bc13
parent6c09931b3f987898f5c581d267ef269f5e2e9575 (diff)
parent27aa55c5e5123fa8b8ad0156559d34d7edff58ca (diff)
Merge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull arm64 support from Catalin Marinas: "Linux support for the 64-bit ARM architecture (AArch64) Features currently supported: - 39-bit address space for user and kernel (each) - 4KB and 64KB page configurations - Compat (32-bit) user applications (ARMv7, EABI only) - Flattened Device Tree (mandated for all AArch64 platforms) - ARM generic timers" * tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: (35 commits) arm64: ptrace: remove obsolete ptrace request numbers from user headers arm64: Do not set the SMP/nAMP processor bit arm64: MAINTAINERS update arm64: Build infrastructure arm64: Miscellaneous header files arm64: Generic timers support arm64: Loadable modules arm64: Miscellaneous library functions arm64: Performance counters support arm64: Add support for /proc/sys/debug/exception-trace arm64: Debugging support arm64: Floating point and SIMD arm64: 32-bit (compat) applications support arm64: User access library functions arm64: Signal handling support arm64: VDSO support arm64: System calls handling arm64: ELF definitions arm64: SMP support arm64: DMA mapping API ...
-rw-r--r--Documentation/arm64/booting.txt152
-rw-r--r--Documentation/arm64/memory.txt73
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm64/Kconfig222
-rw-r--r--arch/arm64/Kconfig.debug27
-rw-r--r--arch/arm64/Makefile71
-rw-r--r--arch/arm64/boot/.gitignore2
-rw-r--r--arch/arm64/boot/Makefile36
-rw-r--r--arch/arm64/boot/install.sh46
-rw-r--r--arch/arm64/configs/defconfig85
-rw-r--r--arch/arm64/include/asm/Kbuild51
-rw-r--r--arch/arm64/include/asm/arm_generic.h100
-rw-r--r--arch/arm64/include/asm/asm-offsets.h1
-rw-r--r--arch/arm64/include/asm/assembler.h109
-rw-r--r--arch/arm64/include/asm/atomic.h305
-rw-r--r--arch/arm64/include/asm/auxvec.h22
-rw-r--r--arch/arm64/include/asm/barrier.h52
-rw-r--r--arch/arm64/include/asm/bitops.h53
-rw-r--r--arch/arm64/include/asm/bitsperlong.h23
-rw-r--r--arch/arm64/include/asm/byteorder.h21
-rw-r--r--arch/arm64/include/asm/cache.h32
-rw-r--r--arch/arm64/include/asm/cacheflush.h148
-rw-r--r--arch/arm64/include/asm/cachetype.h48
-rw-r--r--arch/arm64/include/asm/cmpxchg.h173
-rw-r--r--arch/arm64/include/asm/compat.h242
-rw-r--r--arch/arm64/include/asm/compiler.h30
-rw-r--r--arch/arm64/include/asm/cputable.h30
-rw-r--r--arch/arm64/include/asm/cputype.h49
-rw-r--r--arch/arm64/include/asm/debug-monitors.h88
-rw-r--r--arch/arm64/include/asm/device.h26
-rw-r--r--arch/arm64/include/asm/dma-mapping.h124
-rw-r--r--arch/arm64/include/asm/elf.h179
-rw-r--r--arch/arm64/include/asm/exception.h23
-rw-r--r--arch/arm64/include/asm/exec.h23
-rw-r--r--arch/arm64/include/asm/fb.h34
-rw-r--r--arch/arm64/include/asm/fcntl.h29
-rw-r--r--arch/arm64/include/asm/fpsimd.h64
-rw-r--r--arch/arm64/include/asm/futex.h136
-rw-r--r--arch/arm64/include/asm/hardirq.h52
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h137
-rw-r--r--arch/arm64/include/asm/hwcap.h53
-rw-r--r--arch/arm64/include/asm/io.h258
-rw-r--r--arch/arm64/include/asm/irq.h8
-rw-r--r--arch/arm64/include/asm/irqflags.h91
-rw-r--r--arch/arm64/include/asm/memblock.h21
-rw-r--r--arch/arm64/include/asm/memory.h144
-rw-r--r--arch/arm64/include/asm/mmu.h30
-rw-r--r--arch/arm64/include/asm/mmu_context.h152
-rw-r--r--arch/arm64/include/asm/module.h23
-rw-r--r--arch/arm64/include/asm/page.h67
-rw-r--r--arch/arm64/include/asm/param.h23
-rw-r--r--arch/arm64/include/asm/perf_event.h22
-rw-r--r--arch/arm64/include/asm/pgalloc.h113
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h43
-rw-r--r--arch/arm64/include/asm/pgtable-2level-types.h60
-rw-r--r--arch/arm64/include/asm/pgtable-3level-hwdef.h50
-rw-r--r--arch/arm64/include/asm/pgtable-3level-types.h66
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h94
-rw-r--r--arch/arm64/include/asm/pgtable.h328
-rw-r--r--arch/arm64/include/asm/pmu.h82
-rw-r--r--arch/arm64/include/asm/proc-fns.h50
-rw-r--r--arch/arm64/include/asm/processor.h175
-rw-r--r--arch/arm64/include/asm/prom.h1
-rw-r--r--arch/arm64/include/asm/ptrace.h207
-rw-r--r--arch/arm64/include/asm/setup.h26
-rw-r--r--arch/arm64/include/asm/shmparam.h28
-rw-r--r--arch/arm64/include/asm/sigcontext.h69
-rw-r--r--arch/arm64/include/asm/siginfo.h23
-rw-r--r--arch/arm64/include/asm/signal.h24
-rw-r--r--arch/arm64/include/asm/signal32.h53
-rw-r--r--arch/arm64/include/asm/smp.h69
-rw-r--r--arch/arm64/include/asm/sparsemem.h24
-rw-r--r--arch/arm64/include/asm/spinlock.h202
-rw-r--r--arch/arm64/include/asm/spinlock_types.h38
-rw-r--r--arch/arm64/include/asm/stacktrace.h29
-rw-r--r--arch/arm64/include/asm/stat.h62
-rw-r--r--arch/arm64/include/asm/statfs.h23
-rw-r--r--arch/arm64/include/asm/syscall.h101
-rw-r--r--arch/arm64/include/asm/syscalls.h40
-rw-r--r--arch/arm64/include/asm/system_misc.h54
-rw-r--r--arch/arm64/include/asm/thread_info.h127
-rw-r--r--arch/arm64/include/asm/timex.h29
-rw-r--r--arch/arm64/include/asm/tlb.h190
-rw-r--r--arch/arm64/include/asm/tlbflush.h122
-rw-r--r--arch/arm64/include/asm/traps.h30
-rw-r--r--arch/arm64/include/asm/uaccess.h297
-rw-r--r--arch/arm64/include/asm/ucontext.h30
-rw-r--r--arch/arm64/include/asm/unistd.h27
-rw-r--r--arch/arm64/include/asm/unistd32.h758
-rw-r--r--arch/arm64/include/asm/vdso.h41
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h43
-rw-r--r--arch/arm64/kernel/.gitignore1
-rw-r--r--arch/arm64/kernel/Makefile27
-rw-r--r--arch/arm64/kernel/arm64ksyms.c46
-rw-r--r--arch/arm64/kernel/asm-offsets.c108
-rw-r--r--arch/arm64/kernel/cputable.c33
-rw-r--r--arch/arm64/kernel/debug-monitors.c288
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S80
-rw-r--r--arch/arm64/kernel/entry.S695
-rw-r--r--arch/arm64/kernel/fpsimd.c106
-rw-r--r--arch/arm64/kernel/head.S510
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c880
-rw-r--r--arch/arm64/kernel/io.c64
-rw-r--r--arch/arm64/kernel/irq.c84
-rw-r--r--arch/arm64/kernel/kuser32.S77
-rw-r--r--arch/arm64/kernel/module.c456
-rw-r--r--arch/arm64/kernel/perf_event.c1368
-rw-r--r--arch/arm64/kernel/process.c408
-rw-r--r--arch/arm64/kernel/ptrace.c1126
-rw-r--r--arch/arm64/kernel/setup.c347
-rw-r--r--arch/arm64/kernel/signal.c437
-rw-r--r--arch/arm64/kernel/signal32.c876
-rw-r--r--arch/arm64/kernel/smp.c469
-rw-r--r--arch/arm64/kernel/stacktrace.c127
-rw-r--r--arch/arm64/kernel/sys.c138
-rw-r--r--arch/arm64/kernel/sys32.S282
-rw-r--r--arch/arm64/kernel/sys_compat.c164
-rw-r--r--arch/arm64/kernel/time.c65
-rw-r--r--arch/arm64/kernel/traps.c348
-rw-r--r--arch/arm64/kernel/vdso.c261
-rw-r--r--arch/arm64/kernel/vdso/.gitignore2
-rw-r--r--arch/arm64/kernel/vdso/Makefile63
-rwxr-xr-xarch/arm64/kernel/vdso/gen_vdso_offsets.sh15
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S242
-rw-r--r--arch/arm64/kernel/vdso/note.S28
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S37
-rw-r--r--arch/arm64/kernel/vdso/vdso.S33
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S100
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S126
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/bitops.c25
-rw-r--r--arch/arm64/lib/clear_page.S39
-rw-r--r--arch/arm64/lib/clear_user.S58
-rw-r--r--arch/arm64/lib/copy_from_user.S66
-rw-r--r--arch/arm64/lib/copy_in_user.S63
-rw-r--r--arch/arm64/lib/copy_page.S46
-rw-r--r--arch/arm64/lib/copy_to_user.S61
-rw-r--r--arch/arm64/lib/delay.c55
-rw-r--r--arch/arm64/lib/strncpy_from_user.S50
-rw-r--r--arch/arm64/lib/strnlen_user.S47
-rw-r--r--arch/arm64/mm/Makefile4
-rw-r--r--arch/arm64/mm/cache.S168
-rw-r--r--arch/arm64/mm/context.c159
-rw-r--r--arch/arm64/mm/copypage.c34
-rw-r--r--arch/arm64/mm/dma-mapping.c79
-rw-r--r--arch/arm64/mm/extable.c17
-rw-r--r--arch/arm64/mm/fault.c534
-rw-r--r--arch/arm64/mm/flush.c135
-rw-r--r--arch/arm64/mm/init.c437
-rw-r--r--arch/arm64/mm/ioremap.c84
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmap.c144
-rw-r--r--arch/arm64/mm/mmu.c395
-rw-r--r--arch/arm64/mm/pgd.c54
-rw-r--r--arch/arm64/mm/proc-macros.S55
-rw-r--r--arch/arm64/mm/proc.S175
-rw-r--r--arch/arm64/mm/tlb.S71
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_generic.c232
-rw-r--r--fs/stat.c4
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h15
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h15
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h17
-rw-r--r--include/asm-generic/bitops/builtin-fls.h16
-rw-r--r--include/clocksource/arm_generic.h21
-rw-r--r--include/linux/elf.h3
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/sysctl.c2
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--tools/perf/perf.h6
171 files changed, 21762 insertions, 6 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
new file mode 100644
index 000000000000..9c4d388daddc
--- /dev/null
+++ b/Documentation/arm64/booting.txt
@@ -0,0 +1,152 @@
1 Booting AArch64 Linux
2 =====================
3
4Author: Will Deacon <will.deacon@arm.com>
5Date : 07 September 2012
6
7This document is based on the ARM booting document by Russell King and
8is relevant to all public releases of the AArch64 Linux kernel.
9
10The AArch64 exception model is made up of a number of exception levels
11(EL0 - EL3), with EL0 and EL1 having a secure and a non-secure
12counterpart. EL2 is the hypervisor level and exists only in non-secure
13mode. EL3 is the highest priority level and exists only in secure mode.
14
15For the purposes of this document, we will use the term `boot loader'
16simply to define all software that executes on the CPU(s) before control
17is passed to the Linux kernel. This may include secure monitor and
18hypervisor code, or it may just be a handful of instructions for
19preparing a minimal boot environment.
20
21Essentially, the boot loader should provide (as a minimum) the
22following:
23
241. Setup and initialise the RAM
252. Setup the device tree
263. Decompress the kernel image
274. Call the kernel image
28
29
301. Setup and initialise RAM
31---------------------------
32
33Requirement: MANDATORY
34
35The boot loader is expected to find and initialise all RAM that the
36kernel will use for volatile data storage in the system. It performs
37this in a machine dependent manner. (It may use internal algorithms
38to automatically locate and size all RAM, or it may use knowledge of
39the RAM in the machine, or any other method the boot loader designer
40sees fit.)
41
42
432. Setup the device tree
44-------------------------
45
46Requirement: MANDATORY
47
48The device tree blob (dtb) must be no bigger than 2 megabytes in size
49and placed at a 2-megabyte boundary within the first 512 megabytes from
50the start of the kernel image. This is to allow the kernel to map the
51blob using a single section mapping in the initial page tables.
52
53
543. Decompress the kernel image
55------------------------------
56
57Requirement: OPTIONAL
58
59The AArch64 kernel does not currently provide a decompressor and
60therefore requires decompression (gzip etc.) to be performed by the boot
61loader if a compressed Image target (e.g. Image.gz) is used. For
62bootloaders that do not implement this requirement, the uncompressed
63Image target is available instead.
64
65
664. Call the kernel image
67------------------------
68
69Requirement: MANDATORY
70
71The decompressed kernel image contains a 32-byte header as follows:
72
73 u32 magic = 0x14000008; /* branch to stext, little-endian */
74 u32 res0 = 0; /* reserved */
75 u64 text_offset; /* Image load offset */
76 u64 res1 = 0; /* reserved */
77 u64 res2 = 0; /* reserved */
78
79The image must be placed at the specified offset (currently 0x80000)
80from the start of the system RAM and called there. The start of the
81system RAM must be aligned to 2MB.
82
83Before jumping into the kernel, the following conditions must be met:
84
85- Quiesce all DMA capable devices so that memory does not get
86 corrupted by bogus network packets or disk data. This will save
87 you many hours of debug.
88
89- Primary CPU general-purpose register settings
90 x0 = physical address of device tree blob (dtb) in system RAM.
91 x1 = 0 (reserved for future use)
92 x2 = 0 (reserved for future use)
93 x3 = 0 (reserved for future use)
94
95- CPU mode
96 All forms of interrupts must be masked in PSTATE.DAIF (Debug, SError,
97 IRQ and FIQ).
98 The CPU must be in either EL2 (RECOMMENDED in order to have access to
99 the virtualisation extensions) or non-secure EL1.
100
101- Caches, MMUs
102 The MMU must be off.
103 Instruction cache may be on or off.
104 Data cache must be off and invalidated.
105 External caches (if present) must be configured and disabled.
106
107- Architected timers
108 CNTFRQ must be programmed with the timer frequency.
109 If entering the kernel at EL1, CNTHCTL_EL2 must have EL1PCTEN (bit 0)
110 set where available.
111
112- Coherency
113 All CPUs to be booted by the kernel must be part of the same coherency
114 domain on entry to the kernel. This may require IMPLEMENTATION DEFINED
115 initialisation to enable the receiving of maintenance operations on
116 each CPU.
117
118- System registers
119 All writable architected system registers at the exception level where
120 the kernel image will be entered must be initialised by software at a
121 higher exception level to prevent execution in an UNKNOWN state.
122
123The boot loader is expected to enter the kernel on each CPU in the
124following manner:
125
126- The primary CPU must jump directly to the first instruction of the
127 kernel image. The device tree blob passed by this CPU must contain
128 for each CPU node:
129
130 1. An 'enable-method' property. Currently, the only supported value
131 for this field is the string "spin-table".
132
133 2. A 'cpu-release-addr' property identifying a 64-bit,
134 zero-initialised memory location.
135
136 It is expected that the bootloader will generate these device tree
137 properties and insert them into the blob prior to kernel entry.
138
139- Any secondary CPUs must spin outside of the kernel in a reserved area
140 of memory (communicated to the kernel by a /memreserve/ region in the
141 device tree) polling their cpu-release-addr location, which must be
142 contained in the reserved region. A wfe instruction may be inserted
143 to reduce the overhead of the busy-loop and a sev will be issued by
144 the primary CPU. When a read of the location pointed to by the
145 cpu-release-addr returns a non-zero value, the CPU must jump directly
146 to this value.
147
148- Secondary CPU general-purpose register settings
149 x0 = 0 (reserved for future use)
150 x1 = 0 (reserved for future use)
151 x2 = 0 (reserved for future use)
152 x3 = 0 (reserved for future use)
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
new file mode 100644
index 000000000000..dbbdcbba75a3
--- /dev/null
+++ b/Documentation/arm64/memory.txt
@@ -0,0 +1,73 @@
1 Memory Layout on AArch64 Linux
2 ==============================
3
4Author: Catalin Marinas <catalin.marinas@arm.com>
5Date : 20 February 2012
6
7This document describes the virtual memory layout used by the AArch64
8Linux kernel. The architecture allows up to 4 levels of translation
9tables with a 4KB page size and up to 3 levels with a 64KB page size.
10
11AArch64 Linux uses 3 levels of translation tables with the 4KB page
12configuration, allowing 39-bit (512GB) virtual addresses for both user
13and kernel. With 64KB pages, only 2 levels of translation tables are
14used but the memory layout is the same.
15
16User addresses have bits 63:39 set to 0 while the kernel addresses have
17the same bits set to 1. TTBRx selection is given by bit 63 of the
18virtual address. The swapper_pg_dir contains only kernel (global)
19mappings while the user pgd contains only user (non-global) mappings.
20The swapper_pgd_dir address is written to TTBR1 and never written to
21TTBR0.
22
23
24AArch64 Linux memory layout:
25
26Start End Size Use
27-----------------------------------------------------------------------
280000000000000000 0000007fffffffff 512GB user
29
30ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc
31
32ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page]
33
34ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space
35
36ffffffbbffff0000 ffffffbcffffffff 64KB [guard page]
37
38ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
39
40ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap]
41
42ffffffbffc000000 ffffffbfffffffff 64MB modules
43
44ffffffc000000000 ffffffffffffffff 256GB memory
45
46
47Translation table lookup with 4KB pages:
48
49+--------+--------+--------+--------+--------+--------+--------+--------+
50|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
51+--------+--------+--------+--------+--------+--------+--------+--------+
52 | | | | | |
53 | | | | | v
54 | | | | | [11:0] in-page offset
55 | | | | +-> [20:12] L3 index
56 | | | +-----------> [29:21] L2 index
57 | | +---------------------> [38:30] L1 index
58 | +-------------------------------> [47:39] L0 index (not used)
59 +-------------------------------------------------> [63] TTBR0/1
60
61
62Translation table lookup with 64KB pages:
63
64+--------+--------+--------+--------+--------+--------+--------+--------+
65|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
66+--------+--------+--------+--------+--------+--------+--------+--------+
67 | | | | |
68 | | | | v
69 | | | | [15:0] in-page offset
70 | | | +----------> [28:16] L3 index
71 | | +--------------------------> [41:29] L2 index (only 38:29 used)
72 | +-------------------------------> [47:42] L1 index (not used)
73 +-------------------------------------------------> [63] TTBR0/1
diff --git a/MAINTAINERS b/MAINTAINERS
index 9362f54bccb8..c98b1a1520d8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1209,6 +1209,12 @@ S: Maintained
1209F: arch/arm/mach-pxa/z2.c 1209F: arch/arm/mach-pxa/z2.c
1210F: arch/arm/mach-pxa/include/mach/z2.h 1210F: arch/arm/mach-pxa/include/mach/z2.h
1211 1211
1212ARM64 PORT (AARCH64 ARCHITECTURE)
1213M: Catalin Marinas <catalin.marinas@arm.com>
1214L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1215S: Maintained
1216F: arch/arm64/
1217
1212ASC7621 HARDWARE MONITOR DRIVER 1218ASC7621 HARDWARE MONITOR DRIVER
1213M: George Joseph <george.joseph@fairview5.com> 1219M: George Joseph <george.joseph@fairview5.com>
1214L: lm-sensors@lm-sensors.org 1220L: lm-sensors@lm-sensors.org
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
new file mode 100644
index 000000000000..767ba5685454
--- /dev/null
+++ b/arch/arm64/Kconfig
@@ -0,0 +1,222 @@
1config ARM64
2 def_bool y
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select GENERIC_CLOCKEVENTS
5 select GENERIC_HARDIRQS_NO_DEPRECATED
6 select GENERIC_IOMAP
7 select GENERIC_IRQ_PROBE
8 select GENERIC_IRQ_SHOW
9 select GENERIC_SMP_IDLE_THREAD
10 select GENERIC_TIME_VSYSCALL
11 select HARDIRQS_SW_RESEND
12 select HAVE_ARCH_TRACEHOOK
13 select HAVE_DMA_API_DEBUG
14 select HAVE_DMA_ATTRS
15 select HAVE_GENERIC_DMA_COHERENT
16 select HAVE_GENERIC_HARDIRQS
17 select HAVE_HW_BREAKPOINT if PERF_EVENTS
18 select HAVE_IRQ_WORK
19 select HAVE_MEMBLOCK
20 select HAVE_PERF_EVENTS
21 select HAVE_SPARSE_IRQ
22 select IRQ_DOMAIN
23 select NO_BOOTMEM
24 select OF
25 select OF_EARLY_FLATTREE
26 select PERF_USE_VMALLOC
27 select RTC_LIB
28 select SPARSE_IRQ
29 help
30 ARM 64-bit (AArch64) Linux support.
31
32config 64BIT
33 def_bool y
34
35config ARCH_PHYS_ADDR_T_64BIT
36 def_bool y
37
38config MMU
39 def_bool y
40
41config NO_IOPORT
42 def_bool y
43
44config STACKTRACE_SUPPORT
45 def_bool y
46
47config LOCKDEP_SUPPORT
48 def_bool y
49
50config TRACE_IRQFLAGS_SUPPORT
51 def_bool y
52
53config GENERIC_LOCKBREAK
54 def_bool y
55 depends on SMP && PREEMPT
56
57config RWSEM_GENERIC_SPINLOCK
58 def_bool y
59
60config GENERIC_HWEIGHT
61 def_bool y
62
63config GENERIC_CSUM
64 def_bool y
65
66config GENERIC_CALIBRATE_DELAY
67 def_bool y
68
69config ZONE_DMA32
70 def_bool y
71
72config ARCH_DMA_ADDR_T_64BIT
73 def_bool y
74
75config NEED_DMA_MAP_STATE
76 def_bool y
77
78config NEED_SG_DMA_LENGTH
79 def_bool y
80
81config SWIOTLB
82 def_bool y
83
84config IOMMU_HELPER
85 def_bool SWIOTLB
86
87source "init/Kconfig"
88
89source "kernel/Kconfig.freezer"
90
91menu "System Type"
92
93endmenu
94
95menu "Bus support"
96
97config ARM_AMBA
98 bool
99
100endmenu
101
102menu "Kernel Features"
103
104source "kernel/time/Kconfig"
105
106config ARM64_64K_PAGES
107 bool "Enable 64KB pages support"
108 help
109 This feature enables 64KB pages support (4KB by default)
110 allowing only two levels of page tables and faster TLB
111 look-up. AArch32 emulation is not available when this feature
112 is enabled.
113
114config SMP
115 bool "Symmetric Multi-Processing"
116 select USE_GENERIC_SMP_HELPERS
117 help
118 This enables support for systems with more than one CPU. If
119 you say N here, the kernel will run on single and
120 multiprocessor machines, but will use only one CPU of a
121 multiprocessor machine. If you say Y here, the kernel will run
122 on many, but not all, single processor machines. On a single
123 processor machine, the kernel will run faster if you say N
124 here.
125
126 If you don't know what to do here, say N.
127
128config NR_CPUS
129 int "Maximum number of CPUs (2-32)"
130 range 2 32
131 depends on SMP
132 default "4"
133
134source kernel/Kconfig.preempt
135
136config HZ
137 int
138 default 100
139
140config ARCH_HAS_HOLES_MEMORYMODEL
141 def_bool y if SPARSEMEM
142
143config ARCH_SPARSEMEM_ENABLE
144 def_bool y
145 select SPARSEMEM_VMEMMAP_ENABLE
146
147config ARCH_SPARSEMEM_DEFAULT
148 def_bool ARCH_SPARSEMEM_ENABLE
149
150config ARCH_SELECT_MEMORY_MODEL
151 def_bool ARCH_SPARSEMEM_ENABLE
152
153config HAVE_ARCH_PFN_VALID
154 def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
155
156config HW_PERF_EVENTS
157 bool "Enable hardware performance counter support for perf events"
158 depends on PERF_EVENTS
159 default y
160 help
161 Enable hardware performance counter support for perf events. If
162 disabled, perf events will use software events only.
163
164source "mm/Kconfig"
165
166endmenu
167
168menu "Boot options"
169
170config CMDLINE
171 string "Default kernel command string"
172 default ""
173 help
174 Provide a set of default command-line options at build time by
175 entering them here. As a minimum, you should specify the the
176 root device (e.g. root=/dev/nfs).
177
178config CMDLINE_FORCE
179 bool "Always use the default kernel command string"
180 help
181 Always use the default kernel command string, even if the boot
182 loader passes other arguments to the kernel.
183 This is useful if you cannot or don't want to change the
184 command-line options your boot loader passes to the kernel.
185
186endmenu
187
188menu "Userspace binary formats"
189
190source "fs/Kconfig.binfmt"
191
192config COMPAT
193 bool "Kernel support for 32-bit EL0"
194 depends on !ARM64_64K_PAGES
195 select COMPAT_BINFMT_ELF
196 help
197 This option enables support for a 32-bit EL0 running under a 64-bit
198 kernel at EL1. AArch32-specific components such as system calls,
199 the user helper functions, VFP support and the ptrace interface are
200 handled appropriately by the kernel.
201
202 If you want to execute 32-bit userspace applications, say Y.
203
204config SYSVIPC_COMPAT
205 def_bool y
206 depends on COMPAT && SYSVIPC
207
208endmenu
209
210source "net/Kconfig"
211
212source "drivers/Kconfig"
213
214source "fs/Kconfig"
215
216source "arch/arm64/Kconfig.debug"
217
218source "security/Kconfig"
219
220source "crypto/Kconfig"
221
222source "lib/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
new file mode 100644
index 000000000000..d7553f2bda66
--- /dev/null
+++ b/arch/arm64/Kconfig.debug
@@ -0,0 +1,27 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config FRAME_POINTER
6 bool
7 default y
8
9config DEBUG_ERRORS
10 bool "Verbose kernel error messages"
11 depends on DEBUG_KERNEL
12 help
13 This option controls verbose debugging information which can be
14 printed when the kernel detects an internal error. This debugging
15 information is useful to kernel hackers when tracking down problems,
16 but mostly meaningless to other people. It's safe to say Y unless
17 you are concerned with the code size or don't want to see these
18 messages.
19
20config DEBUG_STACK_USAGE
21 bool "Enable stack utilization instrumentation"
22 depends on DEBUG_KERNEL
23 help
24 Enables the display of the minimum amount of free stack which each
25 task has ever had available in the sysrq-T output.
26
27endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
new file mode 100644
index 000000000000..364191f3be43
--- /dev/null
+++ b/arch/arm64/Makefile
@@ -0,0 +1,71 @@
1#
2# arch/arm64/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License. See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11# Copyright (C) 1995-2001 by Russell King
12
13LDFLAGS_vmlinux :=-p --no-undefined -X
14CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
15OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
16GZFLAGS :=-9
17
18LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
19
20KBUILD_DEFCONFIG := defconfig
21
22KBUILD_CFLAGS += -mgeneral-regs-only
23KBUILD_CPPFLAGS += -mlittle-endian
24AS += -EL
25LD += -EL
26
27comma = ,
28
29CHECKFLAGS += -D__aarch64__
30
31# Default value
32head-y := arch/arm64/kernel/head.o
33
34# The byte offset of the kernel image in RAM from the start of RAM.
35TEXT_OFFSET := 0x00080000
36
37export TEXT_OFFSET GZFLAGS
38
39core-y += arch/arm64/kernel/ arch/arm64/mm/
40libs-y := arch/arm64/lib/ $(libs-y)
41libs-y += $(LIBGCC)
42
43# Default target when executing plain make
44KBUILD_IMAGE := Image.gz
45
46all: $(KBUILD_IMAGE)
47
48boot := arch/arm64/boot
49
50Image Image.gz: vmlinux
51 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
52
53zinstall install: vmlinux
54 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
55
56%.dtb:
57 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
58
59# We use MRPROPER_FILES and CLEAN_FILES now
60archclean:
61 $(Q)$(MAKE) $(clean)=$(boot)
62
63define archhelp
64 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
65 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
66 echo ' install - Install uncompressed kernel'
67 echo ' zinstall - Install compressed kernel'
68 echo ' Install using (your) ~/bin/installkernel or'
69 echo ' (distribution) /sbin/installkernel or'
70 echo ' install to $$(INSTALL_PATH) and run lilo'
71endef
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
new file mode 100644
index 000000000000..8dab0bb6ae66
--- /dev/null
+++ b/arch/arm64/boot/.gitignore
@@ -0,0 +1,2 @@
1Image
2Image.gz
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
new file mode 100644
index 000000000000..eca209b2b0bf
--- /dev/null
+++ b/arch/arm64/boot/Makefile
@@ -0,0 +1,36 @@
1#
2# arch/arm64/boot/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License. See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11# Copyright (C) 2012, ARM Ltd.
12# Author: Will Deacon <will.deacon@arm.com>
13#
14# Based on the ia64 boot/Makefile.
15#
16
17targets := Image Image.gz
18
19$(obj)/Image: vmlinux FORCE
20 $(call if_changed,objcopy)
21
22$(obj)/Image.gz: $(obj)/Image FORCE
23 $(call if_changed,gzip)
24
25$(obj)/%.dtb: $(src)/dts/%.dts
26 $(call cmd,dtc)
27
28install: $(obj)/Image
29 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
30 $(obj)/Image System.map "$(INSTALL_PATH)"
31
32zinstall: $(obj)/Image.gz
33 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
34 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
35
36clean-files += *.dtb
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
new file mode 100644
index 000000000000..12ed78aa6f0c
--- /dev/null
+++ b/arch/arm64/boot/install.sh
@@ -0,0 +1,46 @@
1#!/bin/sh
2#
3# arch/arm64/boot/install.sh
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 1995 by Linus Torvalds
10#
11# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
12# Adapted from code in arch/i386/boot/install.sh by Russell King
13#
14# "make install" script for the AArch64 Linux port
15#
16# Arguments:
17# $1 - kernel version
18# $2 - kernel image file
19# $3 - kernel map file
20# $4 - default install path (blank if root directory)
21#
22
23# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
26
27if [ "$(basename $2)" = "Image.gz" ]; then
28# Compressed install
29 echo "Installing compressed kernel"
30 base=vmlinuz
31else
32# Normal install
33 echo "Installing normal kernel"
34 base=vmlinux
35fi
36
37if [ -f $4/$base-$1 ]; then
38 mv $4/$base-$1 $4/$base-$1.old
39fi
40cat $2 > $4/$base-$1
41
42# Install system map file
43if [ -f $4/System.map-$1 ]; then
44 mv $4/System.map-$1 $4/System.map-$1.old
45fi
46cp $3 $4/System.map-$1
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
new file mode 100644
index 000000000000..9212c7880da7
--- /dev/null
+++ b/arch/arm64/configs/defconfig
@@ -0,0 +1,85 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_BSD_PROCESS_ACCT_V3=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14
13# CONFIG_UTS_NS is not set
14# CONFIG_IPC_NS is not set
15# CONFIG_PID_NS is not set
16# CONFIG_NET_NS is not set
17CONFIG_SCHED_AUTOGROUP=y
18CONFIG_BLK_DEV_INITRD=y
19CONFIG_KALLSYMS_ALL=y
20# CONFIG_COMPAT_BRK is not set
21CONFIG_PROFILING=y
22CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y
24# CONFIG_BLK_DEV_BSG is not set
25# CONFIG_IOSCHED_DEADLINE is not set
26CONFIG_SMP=y
27CONFIG_PREEMPT_VOLUNTARY=y
28CONFIG_CMDLINE="console=ttyAMA0"
29# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
30CONFIG_COMPAT=y
31CONFIG_NET=y
32CONFIG_PACKET=y
33CONFIG_UNIX=y
34CONFIG_INET=y
35CONFIG_IP_PNP=y
36CONFIG_IP_PNP_DHCP=y
37CONFIG_IP_PNP_BOOTP=y
38# CONFIG_INET_LRO is not set
39# CONFIG_IPV6 is not set
40# CONFIG_WIRELESS is not set
41CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
42CONFIG_DEVTMPFS=y
43# CONFIG_BLK_DEV is not set
44CONFIG_SCSI=y
45# CONFIG_SCSI_PROC_FS is not set
46CONFIG_BLK_DEV_SD=y
47# CONFIG_SCSI_LOWLEVEL is not set
48CONFIG_NETDEVICES=y
49CONFIG_MII=y
50# CONFIG_WLAN is not set
51CONFIG_INPUT_EVDEV=y
52# CONFIG_SERIO_I8042 is not set
53# CONFIG_SERIO_SERPORT is not set
54CONFIG_LEGACY_PTY_COUNT=16
55# CONFIG_HW_RANDOM is not set
56# CONFIG_HWMON is not set
57CONFIG_FB=y
58# CONFIG_VGA_CONSOLE is not set
59CONFIG_FRAMEBUFFER_CONSOLE=y
60CONFIG_LOGO=y
61# CONFIG_LOGO_LINUX_MONO is not set
62# CONFIG_LOGO_LINUX_VGA16 is not set
63# CONFIG_USB_SUPPORT is not set
64# CONFIG_IOMMU_SUPPORT is not set
65CONFIG_EXT2_FS=y
66CONFIG_EXT3_FS=y
67# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
68# CONFIG_EXT3_FS_XATTR is not set
69CONFIG_FUSE_FS=y
70CONFIG_CUSE=y
71CONFIG_VFAT_FS=y
72CONFIG_TMPFS=y
73# CONFIG_MISC_FILESYSTEMS is not set
74CONFIG_NFS_FS=y
75CONFIG_ROOT_NFS=y
76CONFIG_NLS_CODEPAGE_437=y
77CONFIG_NLS_ISO8859_1=y
78CONFIG_MAGIC_SYSRQ=y
79CONFIG_DEBUG_FS=y
80CONFIG_DEBUG_KERNEL=y
81# CONFIG_SCHED_DEBUG is not set
82CONFIG_DEBUG_INFO=y
83# CONFIG_FTRACE is not set
84CONFIG_ATOMIC64_SELFTEST=y
85CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
new file mode 100644
index 000000000000..35924a542d43
--- /dev/null
+++ b/arch/arm64/include/asm/Kbuild
@@ -0,0 +1,51 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += hwcap.h
4
5generic-y += bug.h
6generic-y += bugs.h
7generic-y += checksum.h
8generic-y += cputime.h
9generic-y += current.h
10generic-y += delay.h
11generic-y += div64.h
12generic-y += dma.h
13generic-y += emergency-restart.h
14generic-y += errno.h
15generic-y += ftrace.h
16generic-y += hw_irq.h
17generic-y += ioctl.h
18generic-y += ioctls.h
19generic-y += ipcbuf.h
20generic-y += irq_regs.h
21generic-y += kdebug.h
22generic-y += kmap_types.h
23generic-y += linkage.h
24generic-y += local.h
25generic-y += local64.h
26generic-y += mman.h
27generic-y += msgbuf.h
28generic-y += mutex.h
29generic-y += pci.h
30generic-y += percpu.h
31generic-y += poll.h
32generic-y += posix_types.h
33generic-y += resource.h
34generic-y += scatterlist.h
35generic-y += sections.h
36generic-y += segment.h
37generic-y += sembuf.h
38generic-y += serial.h
39generic-y += shmbuf.h
40generic-y += sizes.h
41generic-y += socket.h
42generic-y += sockios.h
43generic-y += string.h
44generic-y += switch_to.h
45generic-y += swab.h
46generic-y += termbits.h
47generic-y += termios.h
48generic-y += topology.h
49generic-y += types.h
50generic-y += unaligned.h
51generic-y += user.h
diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h
new file mode 100644
index 000000000000..e4cec9d30f27
--- /dev/null
+++ b/arch/arm64/include/asm/arm_generic.h
@@ -0,0 +1,100 @@
1/*
2 * arch/arm64/include/asm/arm_generic.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_ARM_GENERIC_H
20#define __ASM_ARM_GENERIC_H
21
22#include <linux/clocksource.h>
23
24#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
25#define ARCH_TIMER_CTRL_IMASK (1 << 1)
26#define ARCH_TIMER_CTRL_ISTATUS (1 << 2)
27
28#define ARCH_TIMER_REG_CTRL 0
29#define ARCH_TIMER_REG_FREQ 1
30#define ARCH_TIMER_REG_TVAL 2
31
32static inline void arch_timer_reg_write(int reg, u32 val)
33{
34 switch (reg) {
35 case ARCH_TIMER_REG_CTRL:
36 asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
37 break;
38 case ARCH_TIMER_REG_TVAL:
39 asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
40 break;
41 default:
42 BUILD_BUG();
43 }
44
45 isb();
46}
47
48static inline u32 arch_timer_reg_read(int reg)
49{
50 u32 val;
51
52 switch (reg) {
53 case ARCH_TIMER_REG_CTRL:
54 asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
55 break;
56 case ARCH_TIMER_REG_FREQ:
57 asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
58 break;
59 case ARCH_TIMER_REG_TVAL:
60 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
61 break;
62 default:
63 BUILD_BUG();
64 }
65
66 return val;
67}
68
69static inline void __cpuinit arch_counter_enable_user_access(void)
70{
71 u32 cntkctl;
72
73 /* Disable user access to the timers and the virtual counter. */
74 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
75 cntkctl &= ~((3 << 8) | (1 << 1));
76
77 /* Enable user access to the physical counter and frequency. */
78 cntkctl |= 1;
79 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
80}
81
82static inline cycle_t arch_counter_get_cntpct(void)
83{
84 cycle_t cval;
85
86 asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
87
88 return cval;
89}
90
91static inline cycle_t arch_counter_get_cntvct(void)
92{
93 cycle_t cval;
94
95 asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
96
97 return cval;
98}
99
100#endif
diff --git a/arch/arm64/include/asm/asm-offsets.h b/arch/arm64/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/arm64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
new file mode 100644
index 000000000000..da2a13e8f1e6
--- /dev/null
+++ b/arch/arm64/include/asm/assembler.h
@@ -0,0 +1,109 @@
1/*
2 * Based on arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24
25/*
26 * Stack pushing/popping (register pairs only). Equivalent to store decrement
27 * before, load increment after.
28 */
29 .macro push, xreg1, xreg2
30 stp \xreg1, \xreg2, [sp, #-16]!
31 .endm
32
33 .macro pop, xreg1, xreg2
34 ldp \xreg1, \xreg2, [sp], #16
35 .endm
36
37/*
38 * Enable and disable interrupts.
39 */
40 .macro disable_irq
41 msr daifset, #2
42 .endm
43
44 .macro enable_irq
45 msr daifclr, #2
46 .endm
47
48/*
49 * Save/disable and restore interrupts.
50 */
51 .macro save_and_disable_irqs, olddaif
52 mrs \olddaif, daif
53 disable_irq
54 .endm
55
56 .macro restore_irqs, olddaif
57 msr daif, \olddaif
58 .endm
59
60/*
61 * Enable and disable debug exceptions.
62 */
63 .macro disable_dbg
64 msr daifset, #8
65 .endm
66
67 .macro enable_dbg
68 msr daifclr, #8
69 .endm
70
71 .macro disable_step, tmp
72 mrs \tmp, mdscr_el1
73 bic \tmp, \tmp, #1
74 msr mdscr_el1, \tmp
75 .endm
76
77 .macro enable_step, tmp
78 mrs \tmp, mdscr_el1
79 orr \tmp, \tmp, #1
80 msr mdscr_el1, \tmp
81 .endm
82
83 .macro enable_dbg_if_not_stepping, tmp
84 mrs \tmp, mdscr_el1
85 tbnz \tmp, #1, 9990f
86 enable_dbg
879990:
88 .endm
89
90/*
91 * SMP data memory barrier
92 */
93 .macro smp_dmb, opt
94#ifdef CONFIG_SMP
95 dmb \opt
96#endif
97 .endm
98
99#define USER(l, x...) \
1009999: x; \
101 .section __ex_table,"a"; \
102 .align 3; \
103 .quad 9999b,l; \
104 .previous
105
106/*
107 * Register aliases.
108 */
109lr .req x30 // link register
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
new file mode 100644
index 000000000000..407717ba060e
--- /dev/null
+++ b/arch/arm64/include/asm/atomic.h
@@ -0,0 +1,305 @@
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#ifndef __ASM_ATOMIC_H
21#define __ASM_ATOMIC_H
22
23#include <linux/compiler.h>
24#include <linux/types.h>
25
26#include <asm/barrier.h>
27#include <asm/cmpxchg.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31#ifdef __KERNEL__
32
33/*
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */
38#define atomic_read(v) (*(volatile int *)&(v)->counter)
39#define atomic_set(v,i) (((v)->counter) = (i))
40
41/*
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
45 */
46static inline void atomic_add(int i, atomic_t *v)
47{
48 unsigned long tmp;
49 int result;
50
51 asm volatile("// atomic_add\n"
52"1: ldxr %w0, [%3]\n"
53" add %w0, %w0, %w4\n"
54" stxr %w1, %w0, [%3]\n"
55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
57 : "r" (&v->counter), "Ir" (i)
58 : "cc");
59}
60
61static inline int atomic_add_return(int i, atomic_t *v)
62{
63 unsigned long tmp;
64 int result;
65
66 asm volatile("// atomic_add_return\n"
67"1: ldaxr %w0, [%3]\n"
68" add %w0, %w0, %w4\n"
69" stlxr %w1, %w0, [%3]\n"
70" cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
72 : "r" (&v->counter), "Ir" (i)
73 : "cc");
74
75 return result;
76}
77
78static inline void atomic_sub(int i, atomic_t *v)
79{
80 unsigned long tmp;
81 int result;
82
83 asm volatile("// atomic_sub\n"
84"1: ldxr %w0, [%3]\n"
85" sub %w0, %w0, %w4\n"
86" stxr %w1, %w0, [%3]\n"
87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
89 : "r" (&v->counter), "Ir" (i)
90 : "cc");
91}
92
93static inline int atomic_sub_return(int i, atomic_t *v)
94{
95 unsigned long tmp;
96 int result;
97
98 asm volatile("// atomic_sub_return\n"
99"1: ldaxr %w0, [%3]\n"
100" sub %w0, %w0, %w4\n"
101" stlxr %w1, %w0, [%3]\n"
102" cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
104 : "r" (&v->counter), "Ir" (i)
105 : "cc");
106
107 return result;
108}
109
110static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111{
112 unsigned long tmp;
113 int oldval;
114
115 asm volatile("// atomic_cmpxchg\n"
116"1: ldaxr %w1, [%3]\n"
117" cmp %w1, %w4\n"
118" b.ne 2f\n"
119" stlxr %w0, %w5, [%3]\n"
120" cbnz %w0, 1b\n"
121"2:"
122 : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
123 : "r" (&ptr->counter), "Ir" (old), "r" (new)
124 : "cc");
125
126 return oldval;
127}
128
129static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
130{
131 unsigned long tmp, tmp2;
132
133 asm volatile("// atomic_clear_mask\n"
134"1: ldxr %0, [%3]\n"
135" bic %0, %0, %4\n"
136" stxr %w1, %0, [%3]\n"
137" cbnz %w1, 1b"
138 : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
139 : "r" (addr), "Ir" (mask)
140 : "cc");
141}
142
143#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
144
145static inline int __atomic_add_unless(atomic_t *v, int a, int u)
146{
147 int c, old;
148
149 c = atomic_read(v);
150 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
151 c = old;
152 return c;
153}
154
155#define atomic_inc(v) atomic_add(1, v)
156#define atomic_dec(v) atomic_sub(1, v)
157
158#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
159#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
160#define atomic_inc_return(v) (atomic_add_return(1, v))
161#define atomic_dec_return(v) (atomic_sub_return(1, v))
162#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
163
164#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
165
166#define smp_mb__before_atomic_dec() smp_mb()
167#define smp_mb__after_atomic_dec() smp_mb()
168#define smp_mb__before_atomic_inc() smp_mb()
169#define smp_mb__after_atomic_inc() smp_mb()
170
171/*
172 * 64-bit atomic operations.
173 */
174#define ATOMIC64_INIT(i) { (i) }
175
176#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
177#define atomic64_set(v,i) (((v)->counter) = (i))
178
179static inline void atomic64_add(u64 i, atomic64_t *v)
180{
181 long result;
182 unsigned long tmp;
183
184 asm volatile("// atomic64_add\n"
185"1: ldxr %0, [%3]\n"
186" add %0, %0, %4\n"
187" stxr %w1, %0, [%3]\n"
188" cbnz %w1, 1b"
189 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
190 : "r" (&v->counter), "Ir" (i)
191 : "cc");
192}
193
194static inline long atomic64_add_return(long i, atomic64_t *v)
195{
196 long result;
197 unsigned long tmp;
198
199 asm volatile("// atomic64_add_return\n"
200"1: ldaxr %0, [%3]\n"
201" add %0, %0, %4\n"
202" stlxr %w1, %0, [%3]\n"
203" cbnz %w1, 1b"
204 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
205 : "r" (&v->counter), "Ir" (i)
206 : "cc");
207
208 return result;
209}
210
211static inline void atomic64_sub(u64 i, atomic64_t *v)
212{
213 long result;
214 unsigned long tmp;
215
216 asm volatile("// atomic64_sub\n"
217"1: ldxr %0, [%3]\n"
218" sub %0, %0, %4\n"
219" stxr %w1, %0, [%3]\n"
220" cbnz %w1, 1b"
221 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
222 : "r" (&v->counter), "Ir" (i)
223 : "cc");
224}
225
226static inline long atomic64_sub_return(long i, atomic64_t *v)
227{
228 long result;
229 unsigned long tmp;
230
231 asm volatile("// atomic64_sub_return\n"
232"1: ldaxr %0, [%3]\n"
233" sub %0, %0, %4\n"
234" stlxr %w1, %0, [%3]\n"
235" cbnz %w1, 1b"
236 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
237 : "r" (&v->counter), "Ir" (i)
238 : "cc");
239
240 return result;
241}
242
243static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
244{
245 long oldval;
246 unsigned long res;
247
248 asm volatile("// atomic64_cmpxchg\n"
249"1: ldaxr %1, [%3]\n"
250" cmp %1, %4\n"
251" b.ne 2f\n"
252" stlxr %w0, %5, [%3]\n"
253" cbnz %w0, 1b\n"
254"2:"
255 : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
256 : "r" (&ptr->counter), "Ir" (old), "r" (new)
257 : "cc");
258
259 return oldval;
260}
261
262#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263
264static inline long atomic64_dec_if_positive(atomic64_t *v)
265{
266 long result;
267 unsigned long tmp;
268
269 asm volatile("// atomic64_dec_if_positive\n"
270"1: ldaxr %0, [%3]\n"
271" subs %0, %0, #1\n"
272" b.mi 2f\n"
273" stlxr %w1, %0, [%3]\n"
274" cbnz %w1, 1b\n"
275"2:"
276 : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
277 : "r" (&v->counter)
278 : "cc");
279
280 return result;
281}
282
283static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
284{
285 long c, old;
286
287 c = atomic64_read(v);
288 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
289 c = old;
290
291 return c != u;
292}
293
294#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
295#define atomic64_inc(v) atomic64_add(1LL, (v))
296#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
297#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
298#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
299#define atomic64_dec(v) atomic64_sub(1LL, (v))
300#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
301#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
302#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
303
304#endif
305#endif
diff --git a/arch/arm64/include/asm/auxvec.h b/arch/arm64/include/asm/auxvec.h
new file mode 100644
index 000000000000..22d6d8885854
--- /dev/null
+++ b/arch/arm64/include/asm/auxvec.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_AUXVEC_H
17#define __ASM_AUXVEC_H
18
19/* vDSO location */
20#define AT_SYSINFO_EHDR 33
21
22#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
new file mode 100644
index 000000000000..d4a63338a53c
--- /dev/null
+++ b/arch/arm64/include/asm/barrier.h
@@ -0,0 +1,52 @@
1/*
2 * Based on arch/arm/include/asm/barrier.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_BARRIER_H
19#define __ASM_BARRIER_H
20
21#ifndef __ASSEMBLY__
22
23#define sev() asm volatile("sev" : : : "memory")
24#define wfe() asm volatile("wfe" : : : "memory")
25#define wfi() asm volatile("wfi" : : : "memory")
26
27#define isb() asm volatile("isb" : : : "memory")
28#define dsb() asm volatile("dsb sy" : : : "memory")
29
30#define mb() dsb()
31#define rmb() asm volatile("dsb ld" : : : "memory")
32#define wmb() asm volatile("dsb st" : : : "memory")
33
34#ifndef CONFIG_SMP
35#define smp_mb() barrier()
36#define smp_rmb() barrier()
37#define smp_wmb() barrier()
38#else
39#define smp_mb() asm volatile("dmb ish" : : : "memory")
40#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
41#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
42#endif
43
44#define read_barrier_depends() do { } while(0)
45#define smp_read_barrier_depends() do { } while(0)
46
47#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
48#define nop() asm volatile("nop");
49
50#endif /* __ASSEMBLY__ */
51
52#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
new file mode 100644
index 000000000000..5e693073b030
--- /dev/null
+++ b/arch/arm64/include/asm/bitops.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_BITOPS_H
17#define __ASM_BITOPS_H
18
19#include <linux/compiler.h>
20
21#include <asm/barrier.h>
22
23/*
24 * clear_bit may not imply a memory barrier
25 */
26#ifndef smp_mb__before_clear_bit
27#define smp_mb__before_clear_bit() smp_mb()
28#define smp_mb__after_clear_bit() smp_mb()
29#endif
30
31#ifndef _LINUX_BITOPS_H
32#error only <linux/bitops.h> can be included directly
33#endif
34
35#include <asm-generic/bitops/builtin-__ffs.h>
36#include <asm-generic/bitops/builtin-ffs.h>
37#include <asm-generic/bitops/builtin-__fls.h>
38#include <asm-generic/bitops/builtin-fls.h>
39
40#include <asm-generic/bitops/ffz.h>
41#include <asm-generic/bitops/fls64.h>
42#include <asm-generic/bitops/find.h>
43
44#include <asm-generic/bitops/sched.h>
45#include <asm-generic/bitops/hweight.h>
46#include <asm-generic/bitops/lock.h>
47
48#include <asm-generic/bitops/atomic.h>
49#include <asm-generic/bitops/non-atomic.h>
50#include <asm-generic/bitops/le.h>
51#include <asm-generic/bitops/ext2-atomic.h>
52
53#endif /* __ASM_BITOPS_H */
diff --git a/arch/arm64/include/asm/bitsperlong.h b/arch/arm64/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..fce9c2924fa3
--- /dev/null
+++ b/arch/arm64/include/asm/bitsperlong.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_BITSPERLONG_H
17#define __ASM_BITSPERLONG_H
18
19#define __BITS_PER_LONG 64
20
21#include <asm-generic/bitsperlong.h>
22
23#endif /* __ASM_BITSPERLONG_H */
diff --git a/arch/arm64/include/asm/byteorder.h b/arch/arm64/include/asm/byteorder.h
new file mode 100644
index 000000000000..2b92046aafc5
--- /dev/null
+++ b/arch/arm64/include/asm/byteorder.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_BYTEORDER_H
17#define __ASM_BYTEORDER_H
18
19#include <linux/byteorder/little_endian.h>
20
21#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
new file mode 100644
index 000000000000..390308a67f0d
--- /dev/null
+++ b/arch/arm64/include/asm/cache.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_CACHE_H
17#define __ASM_CACHE_H
18
19#define L1_CACHE_SHIFT 6
20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
21
22/*
23 * Memory returned by kmalloc() may be used for DMA, so we must make
24 * sure that all such allocations are cache aligned. Otherwise,
25 * unrelated code may cause parts of the buffer to be read into the
26 * cache before the transfer is done, causing old data to be seen by
27 * the CPU.
28 */
29#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
30#define ARCH_SLAB_MINALIGN 8
31
32#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
new file mode 100644
index 000000000000..aa3132ab7f29
--- /dev/null
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -0,0 +1,148 @@
1/*
2 * Based on arch/arm/include/asm/cacheflush.h
3 *
4 * Copyright (C) 1999-2002 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_CACHEFLUSH_H
20#define __ASM_CACHEFLUSH_H
21
22#include <linux/mm.h>
23
24/*
25 * This flag is used to indicate that the page pointed to by a pte is clean
26 * and does not require cleaning before returning it to the user.
27 */
28#define PG_dcache_clean PG_arch_1
29
30/*
31 * MM Cache Management
32 * ===================
33 *
34 * The arch/arm64/mm/cache.S implements these methods.
35 *
36 * Start addresses are inclusive and end addresses are exclusive; start
37 * addresses should be rounded down, end addresses up.
38 *
39 * See Documentation/cachetlb.txt for more information. Please note that
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache.
42 *
43 * flush_cache_all()
44 *
45 * Unconditionally clean and invalidate the entire cache.
46 *
47 * flush_cache_mm(mm)
48 *
49 * Clean and invalidate all user space cache entries
50 * before a change of page tables.
51 *
52 * flush_icache_range(start, end)
53 *
54 * Ensure coherency between the I-cache and the D-cache in the
55 * region described by start, end.
56 * - start - virtual start address
57 * - end - virtual end address
58 *
59 * __flush_cache_user_range(start, end)
60 *
61 * Ensure coherency between the I-cache and the D-cache in the
62 * region described by start, end.
63 * - start - virtual start address
64 * - end - virtual end address
65 *
66 * __flush_dcache_area(kaddr, size)
67 *
68 * Ensure that the data held in page is written back.
69 * - kaddr - page address
70 * - size - region size
71 */
72extern void flush_cache_all(void);
73extern void flush_cache_mm(struct mm_struct *mm);
74extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
75extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
76extern void flush_icache_range(unsigned long start, unsigned long end);
77extern void __flush_dcache_area(void *addr, size_t len);
78extern void __flush_cache_user_range(unsigned long start, unsigned long end);
79
80/*
81 * Copy user data from/to a page which is mapped into a different
82 * processes address space. Really, we want to allow our "user
83 * space" model to handle this.
84 */
85extern void copy_to_user_page(struct vm_area_struct *, struct page *,
86 unsigned long, void *, const void *, unsigned long);
87#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
88 do { \
89 memcpy(dst, src, len); \
90 } while (0)
91
92#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
93
94/*
95 * flush_dcache_page is used when the kernel has written to the page
96 * cache page at virtual address page->virtual.
97 *
98 * If this page isn't mapped (ie, page_mapping == NULL), or it might
99 * have userspace mappings, then we _must_ always clean + invalidate
100 * the dcache entries associated with the kernel mapping.
101 *
102 * Otherwise we can defer the operation, and clean the cache when we are
103 * about to change to user space. This is the same method as used on SPARC64.
104 * See update_mmu_cache for the user space part.
105 */
106#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
107extern void flush_dcache_page(struct page *);
108
109static inline void __flush_icache_all(void)
110{
111 asm("ic ialluis");
112}
113
114#define flush_dcache_mmap_lock(mapping) \
115 spin_lock_irq(&(mapping)->tree_lock)
116#define flush_dcache_mmap_unlock(mapping) \
117 spin_unlock_irq(&(mapping)->tree_lock)
118
119#define flush_icache_user_range(vma,page,addr,len) \
120 flush_dcache_page(page)
121
122/*
123 * We don't appear to need to do anything here. In fact, if we did, we'd
124 * duplicate cache flushing elsewhere performed by flush_dcache_page().
125 */
126#define flush_icache_page(vma,page) do { } while (0)
127
128/*
129 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
130 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
131 * caches, since the direct-mappings of these pages may contain cached
132 * data, we need to do a full cache flush to ensure that writebacks
133 * don't corrupt data placed into these pages via the new mappings.
134 */
135static inline void flush_cache_vmap(unsigned long start, unsigned long end)
136{
137 /*
138 * set_pte_at() called from vmap_pte_range() does not
139 * have a DSB after cleaning the cache line.
140 */
141 dsb();
142}
143
144static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
145{
146}
147
148#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
new file mode 100644
index 000000000000..85f5f511352a
--- /dev/null
+++ b/arch/arm64/include/asm/cachetype.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_CACHETYPE_H
17#define __ASM_CACHETYPE_H
18
19#include <asm/cputype.h>
20
21#define CTR_L1IP_SHIFT 14
22#define CTR_L1IP_MASK 3
23
24#define ICACHE_POLICY_RESERVED 0
25#define ICACHE_POLICY_AIVIVT 1
26#define ICACHE_POLICY_VIPT 2
27#define ICACHE_POLICY_PIPT 3
28
29static inline u32 icache_policy(void)
30{
31 return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK;
32}
33
34/*
35 * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
36 * permitted in the I-cache.
37 */
38static inline int icache_is_aliasing(void)
39{
40 return icache_policy() != ICACHE_POLICY_PIPT;
41}
42
43static inline int icache_is_aivivt(void)
44{
45 return icache_policy() == ICACHE_POLICY_AIVIVT;
46}
47
48#endif /* __ASM_CACHETYPE_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..e0e65b069d9e
--- /dev/null
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -0,0 +1,173 @@
1/*
2 * Based on arch/arm/include/asm/cmpxchg.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_CMPXCHG_H
19#define __ASM_CMPXCHG_H
20
21#include <linux/bug.h>
22
23#include <asm/barrier.h>
24
25static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
26{
27 unsigned long ret, tmp;
28
29 switch (size) {
30 case 1:
31 asm volatile("// __xchg1\n"
32 "1: ldaxrb %w0, [%3]\n"
33 " stlxrb %w1, %w2, [%3]\n"
34 " cbnz %w1, 1b\n"
35 : "=&r" (ret), "=&r" (tmp)
36 : "r" (x), "r" (ptr)
37 : "memory", "cc");
38 break;
39 case 2:
40 asm volatile("// __xchg2\n"
41 "1: ldaxrh %w0, [%3]\n"
42 " stlxrh %w1, %w2, [%3]\n"
43 " cbnz %w1, 1b\n"
44 : "=&r" (ret), "=&r" (tmp)
45 : "r" (x), "r" (ptr)
46 : "memory", "cc");
47 break;
48 case 4:
49 asm volatile("// __xchg4\n"
50 "1: ldaxr %w0, [%3]\n"
51 " stlxr %w1, %w2, [%3]\n"
52 " cbnz %w1, 1b\n"
53 : "=&r" (ret), "=&r" (tmp)
54 : "r" (x), "r" (ptr)
55 : "memory", "cc");
56 break;
57 case 8:
58 asm volatile("// __xchg8\n"
59 "1: ldaxr %0, [%3]\n"
60 " stlxr %w1, %2, [%3]\n"
61 " cbnz %w1, 1b\n"
62 : "=&r" (ret), "=&r" (tmp)
63 : "r" (x), "r" (ptr)
64 : "memory", "cc");
65 break;
66 default:
67 BUILD_BUG();
68 }
69
70 return ret;
71}
72
73#define xchg(ptr,x) \
74 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
75
76static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
77 unsigned long new, int size)
78{
79 unsigned long oldval = 0, res;
80
81 switch (size) {
82 case 1:
83 do {
84 asm volatile("// __cmpxchg1\n"
85 " ldxrb %w1, [%2]\n"
86 " mov %w0, #0\n"
87 " cmp %w1, %w3\n"
88 " b.ne 1f\n"
89 " stxrb %w0, %w4, [%2]\n"
90 "1:\n"
91 : "=&r" (res), "=&r" (oldval)
92 : "r" (ptr), "Ir" (old), "r" (new)
93 : "cc");
94 } while (res);
95 break;
96
97 case 2:
98 do {
99 asm volatile("// __cmpxchg2\n"
100 " ldxrh %w1, [%2]\n"
101 " mov %w0, #0\n"
102 " cmp %w1, %w3\n"
103 " b.ne 1f\n"
104 " stxrh %w0, %w4, [%2]\n"
105 "1:\n"
106 : "=&r" (res), "=&r" (oldval)
107 : "r" (ptr), "Ir" (old), "r" (new)
108 : "memory", "cc");
109 } while (res);
110 break;
111
112 case 4:
113 do {
114 asm volatile("// __cmpxchg4\n"
115 " ldxr %w1, [%2]\n"
116 " mov %w0, #0\n"
117 " cmp %w1, %w3\n"
118 " b.ne 1f\n"
119 " stxr %w0, %w4, [%2]\n"
120 "1:\n"
121 : "=&r" (res), "=&r" (oldval)
122 : "r" (ptr), "Ir" (old), "r" (new)
123 : "cc");
124 } while (res);
125 break;
126
127 case 8:
128 do {
129 asm volatile("// __cmpxchg8\n"
130 " ldxr %1, [%2]\n"
131 " mov %w0, #0\n"
132 " cmp %1, %3\n"
133 " b.ne 1f\n"
134 " stxr %w0, %4, [%2]\n"
135 "1:\n"
136 : "=&r" (res), "=&r" (oldval)
137 : "r" (ptr), "Ir" (old), "r" (new)
138 : "cc");
139 } while (res);
140 break;
141
142 default:
143 BUILD_BUG();
144 }
145
146 return oldval;
147}
148
149static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
150 unsigned long new, int size)
151{
152 unsigned long ret;
153
154 smp_mb();
155 ret = __cmpxchg(ptr, old, new, size);
156 smp_mb();
157
158 return ret;
159}
160
161#define cmpxchg(ptr,o,n) \
162 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
163 (unsigned long)(o), \
164 (unsigned long)(n), \
165 sizeof(*(ptr))))
166
167#define cmpxchg_local(ptr,o,n) \
168 ((__typeof__(*(ptr)))__cmpxchg((ptr), \
169 (unsigned long)(o), \
170 (unsigned long)(n), \
171 sizeof(*(ptr))))
172
173#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
new file mode 100644
index 000000000000..a670a33ad736
--- /dev/null
+++ b/arch/arm64/include/asm/compat.h
@@ -0,0 +1,242 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_COMPAT_H
17#define __ASM_COMPAT_H
18#ifdef __KERNEL__
19#ifdef CONFIG_COMPAT
20
21/*
22 * Architecture specific compatibility types
23 */
24#include <linux/types.h>
25#include <linux/sched.h>
26
27#define COMPAT_USER_HZ 100
28#define COMPAT_UTS_MACHINE "armv8l\0\0"
29
30typedef u32 compat_size_t;
31typedef s32 compat_ssize_t;
32typedef s32 compat_time_t;
33typedef s32 compat_clock_t;
34typedef s32 compat_pid_t;
35typedef u32 __compat_uid_t;
36typedef u32 __compat_gid_t;
37typedef u32 __compat_uid32_t;
38typedef u32 __compat_gid32_t;
39typedef u32 compat_mode_t;
40typedef u32 compat_ino_t;
41typedef u32 compat_dev_t;
42typedef s32 compat_off_t;
43typedef s64 compat_loff_t;
44typedef s16 compat_nlink_t;
45typedef u16 compat_ipc_pid_t;
46typedef s32 compat_daddr_t;
47typedef u32 compat_caddr_t;
48typedef __kernel_fsid_t compat_fsid_t;
49typedef s32 compat_key_t;
50typedef s32 compat_timer_t;
51
52typedef s32 compat_int_t;
53typedef s32 compat_long_t;
54typedef s64 compat_s64;
55typedef u32 compat_uint_t;
56typedef u32 compat_ulong_t;
57typedef u64 compat_u64;
58
59struct compat_timespec {
60 compat_time_t tv_sec;
61 s32 tv_nsec;
62};
63
64struct compat_timeval {
65 compat_time_t tv_sec;
66 s32 tv_usec;
67};
68
69struct compat_stat {
70 compat_dev_t st_dev;
71 compat_ino_t st_ino;
72 compat_mode_t st_mode;
73 compat_nlink_t st_nlink;
74 __compat_uid32_t st_uid;
75 __compat_gid32_t st_gid;
76 compat_dev_t st_rdev;
77 compat_off_t st_size;
78 compat_off_t st_blksize;
79 compat_off_t st_blocks;
80 compat_time_t st_atime;
81 u32 st_atime_nsec;
82 compat_time_t st_mtime;
83 u32 st_mtime_nsec;
84 compat_time_t st_ctime;
85 u32 st_ctime_nsec;
86 u32 __unused4[2];
87};
88
89struct compat_flock {
90 short l_type;
91 short l_whence;
92 compat_off_t l_start;
93 compat_off_t l_len;
94 compat_pid_t l_pid;
95};
96
97#define F_GETLK64 12 /* using 'struct flock64' */
98#define F_SETLK64 13
99#define F_SETLKW64 14
100
101struct compat_flock64 {
102 short l_type;
103 short l_whence;
104 compat_loff_t l_start;
105 compat_loff_t l_len;
106 compat_pid_t l_pid;
107};
108
109struct compat_statfs {
110 int f_type;
111 int f_bsize;
112 int f_blocks;
113 int f_bfree;
114 int f_bavail;
115 int f_files;
116 int f_ffree;
117 compat_fsid_t f_fsid;
118 int f_namelen; /* SunOS ignores this field. */
119 int f_frsize;
120 int f_flags;
121 int f_spare[4];
122};
123
124#define COMPAT_RLIM_INFINITY 0xffffffff
125
126typedef u32 compat_old_sigset_t;
127
128#define _COMPAT_NSIG 64
129#define _COMPAT_NSIG_BPW 32
130
131typedef u32 compat_sigset_word;
132
133#define COMPAT_OFF_T_MAX 0x7fffffff
134#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
135
136/*
137 * A pointer passed in from user mode. This should not
138 * be used for syscall parameters, just declare them
139 * as pointers because the syscall entry code will have
140 * appropriately converted them already.
141 */
142typedef u32 compat_uptr_t;
143
144static inline void __user *compat_ptr(compat_uptr_t uptr)
145{
146 return (void __user *)(unsigned long)uptr;
147}
148
149static inline compat_uptr_t ptr_to_compat(void __user *uptr)
150{
151 return (u32)(unsigned long)uptr;
152}
153
154static inline void __user *arch_compat_alloc_user_space(long len)
155{
156 struct pt_regs *regs = task_pt_regs(current);
157 return (void __user *)regs->compat_sp - len;
158}
159
160struct compat_ipc64_perm {
161 compat_key_t key;
162 __compat_uid32_t uid;
163 __compat_gid32_t gid;
164 __compat_uid32_t cuid;
165 __compat_gid32_t cgid;
166 unsigned short mode;
167 unsigned short __pad1;
168 unsigned short seq;
169 unsigned short __pad2;
170 compat_ulong_t unused1;
171 compat_ulong_t unused2;
172};
173
174struct compat_semid64_ds {
175 struct compat_ipc64_perm sem_perm;
176 compat_time_t sem_otime;
177 compat_ulong_t __unused1;
178 compat_time_t sem_ctime;
179 compat_ulong_t __unused2;
180 compat_ulong_t sem_nsems;
181 compat_ulong_t __unused3;
182 compat_ulong_t __unused4;
183};
184
185struct compat_msqid64_ds {
186 struct compat_ipc64_perm msg_perm;
187 compat_time_t msg_stime;
188 compat_ulong_t __unused1;
189 compat_time_t msg_rtime;
190 compat_ulong_t __unused2;
191 compat_time_t msg_ctime;
192 compat_ulong_t __unused3;
193 compat_ulong_t msg_cbytes;
194 compat_ulong_t msg_qnum;
195 compat_ulong_t msg_qbytes;
196 compat_pid_t msg_lspid;
197 compat_pid_t msg_lrpid;
198 compat_ulong_t __unused4;
199 compat_ulong_t __unused5;
200};
201
202struct compat_shmid64_ds {
203 struct compat_ipc64_perm shm_perm;
204 compat_size_t shm_segsz;
205 compat_time_t shm_atime;
206 compat_ulong_t __unused1;
207 compat_time_t shm_dtime;
208 compat_ulong_t __unused2;
209 compat_time_t shm_ctime;
210 compat_ulong_t __unused3;
211 compat_pid_t shm_cpid;
212 compat_pid_t shm_lpid;
213 compat_ulong_t shm_nattch;
214 compat_ulong_t __unused4;
215 compat_ulong_t __unused5;
216};
217
218static inline int is_compat_task(void)
219{
220 return test_thread_flag(TIF_32BIT);
221}
222
223static inline int is_compat_thread(struct thread_info *thread)
224{
225 return test_ti_thread_flag(thread, TIF_32BIT);
226}
227
228#else /* !CONFIG_COMPAT */
229
230static inline int is_compat_task(void)
231{
232 return 0;
233}
234
235static inline int is_compat_thread(struct thread_info *thread)
236{
237 return 0;
238}
239
240#endif /* CONFIG_COMPAT */
241#endif /* __KERNEL__ */
242#endif /* __ASM_COMPAT_H */
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
new file mode 100644
index 000000000000..ee35fd0f2236
--- /dev/null
+++ b/arch/arm64/include/asm/compiler.h
@@ -0,0 +1,30 @@
1/*
2 * Based on arch/arm/include/asm/compiler.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_COMPILER_H
19#define __ASM_COMPILER_H
20
21/*
22 * This is used to ensure the compiler did actually allocate the register we
23 * asked it for some inline assembly sequences. Apparently we can't trust the
24 * compiler from one version to another so a bit of paranoia won't hurt. This
25 * string is meant to be concatenated with the inline asm string and will
26 * cause compilation to stop on mismatch. (for details, see gcc PR 15089)
27 */
28#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
29
30#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h
new file mode 100644
index 000000000000..e3bd983d3661
--- /dev/null
+++ b/arch/arm64/include/asm/cputable.h
@@ -0,0 +1,30 @@
1/*
2 * arch/arm64/include/asm/cputable.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_CPUTABLE_H
19#define __ASM_CPUTABLE_H
20
21struct cpu_info {
22 unsigned int cpu_id_val;
23 unsigned int cpu_id_mask;
24 const char *cpu_name;
25 unsigned long (*cpu_setup)(void);
26};
27
28extern struct cpu_info *lookup_processor_type(unsigned int);
29
30#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
new file mode 100644
index 000000000000..ef54125e6c1e
--- /dev/null
+++ b/arch/arm64/include/asm/cputype.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_CPUTYPE_H
17#define __ASM_CPUTYPE_H
18
19#define ID_MIDR_EL1 "midr_el1"
20#define ID_CTR_EL0 "ctr_el0"
21
22#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
23#define ID_AA64DFR0_EL1 "id_aa64dfr0_el1"
24#define ID_AA64AFR0_EL1 "id_aa64afr0_el1"
25#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
26#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
27
28#define read_cpuid(reg) ({ \
29 u64 __val; \
30 asm("mrs %0, " reg : "=r" (__val)); \
31 __val; \
32})
33
34/*
35 * The CPU ID never changes at run time, so we might as well tell the
36 * compiler that it's constant. Use this function to read the CPU ID
37 * rather than directly reading processor_id or read_cpuid() directly.
38 */
39static inline u32 __attribute_const__ read_cpuid_id(void)
40{
41 return read_cpuid(ID_MIDR_EL1);
42}
43
44static inline u32 __attribute_const__ read_cpuid_cachetype(void)
45{
46 return read_cpuid(ID_CTR_EL0);
47}
48
49#endif
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
new file mode 100644
index 000000000000..7eaa0b302493
--- /dev/null
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_DEBUG_MONITORS_H
17#define __ASM_DEBUG_MONITORS_H
18
19#ifdef __KERNEL__
20
21#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
22
23/* AArch64 */
24#define DBG_ESR_EVT_HWBP 0x0
25#define DBG_ESR_EVT_HWSS 0x1
26#define DBG_ESR_EVT_HWWP 0x2
27#define DBG_ESR_EVT_BRK 0x6
28
29enum debug_el {
30 DBG_ACTIVE_EL0 = 0,
31 DBG_ACTIVE_EL1,
32};
33
34/* AArch32 */
35#define DBG_ESR_EVT_BKPT 0x4
36#define DBG_ESR_EVT_VECC 0x5
37
38#define AARCH32_BREAK_ARM 0x07f001f0
39#define AARCH32_BREAK_THUMB 0xde01
40#define AARCH32_BREAK_THUMB2_LO 0xf7f0
41#define AARCH32_BREAK_THUMB2_HI 0xa000
42
43#ifndef __ASSEMBLY__
44struct task_struct;
45
46#define local_dbg_save(flags) \
47 do { \
48 typecheck(unsigned long, flags); \
49 asm volatile( \
50 "mrs %0, daif // local_dbg_save\n" \
51 "msr daifset, #8" \
52 : "=r" (flags) : : "memory"); \
53 } while (0)
54
55#define local_dbg_restore(flags) \
56 do { \
57 typecheck(unsigned long, flags); \
58 asm volatile( \
59 "msr daif, %0 // local_dbg_restore\n" \
60 : : "r" (flags) : "memory"); \
61 } while (0)
62
63#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
64
65u8 debug_monitors_arch(void);
66
67void enable_debug_monitors(enum debug_el el);
68void disable_debug_monitors(enum debug_el el);
69
70void user_rewind_single_step(struct task_struct *task);
71void user_fastforward_single_step(struct task_struct *task);
72
73void kernel_enable_single_step(struct pt_regs *regs);
74void kernel_disable_single_step(void);
75int kernel_active_single_step(void);
76
77#ifdef CONFIG_HAVE_HW_BREAKPOINT
78int reinstall_suspended_bps(struct pt_regs *regs);
79#else
80static inline int reinstall_suspended_bps(struct pt_regs *regs)
81{
82 return -ENODEV;
83}
84#endif
85
86#endif /* __ASSEMBLY */
87#endif /* __KERNEL__ */
88#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
new file mode 100644
index 000000000000..0d8453c755a8
--- /dev/null
+++ b/arch/arm64/include/asm/device.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_DEVICE_H
17#define __ASM_DEVICE_H
18
19struct dev_archdata {
20 struct dma_map_ops *dma_ops;
21};
22
23struct pdev_archdata {
24};
25
26#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..538f4b44db5d
--- /dev/null
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_DMA_MAPPING_H
17#define __ASM_DMA_MAPPING_H
18
19#ifdef __KERNEL__
20
21#include <linux/types.h>
22#include <linux/vmalloc.h>
23
24#include <asm-generic/dma-coherent.h>
25
26#define ARCH_HAS_DMA_GET_REQUIRED_MASK
27
28extern struct dma_map_ops *dma_ops;
29
30static inline struct dma_map_ops *get_dma_ops(struct device *dev)
31{
32 if (unlikely(!dev) || !dev->archdata.dma_ops)
33 return dma_ops;
34 else
35 return dev->archdata.dma_ops;
36}
37
38#include <asm-generic/dma-mapping-common.h>
39
40static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
41{
42 return (dma_addr_t)paddr;
43}
44
45static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
46{
47 return (phys_addr_t)dev_addr;
48}
49
50static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
51{
52 struct dma_map_ops *ops = get_dma_ops(dev);
53 return ops->mapping_error(dev, dev_addr);
54}
55
56static inline int dma_supported(struct device *dev, u64 mask)
57{
58 struct dma_map_ops *ops = get_dma_ops(dev);
59 return ops->dma_supported(dev, mask);
60}
61
62static inline int dma_set_mask(struct device *dev, u64 mask)
63{
64 if (!dev->dma_mask || !dma_supported(dev, mask))
65 return -EIO;
66 *dev->dma_mask = mask;
67
68 return 0;
69}
70
71static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
72{
73 if (!dev->dma_mask)
74 return 0;
75
76 return addr + size - 1 <= *dev->dma_mask;
77}
78
79static inline void dma_mark_clean(void *addr, size_t size)
80{
81}
82
83static inline void *dma_alloc_coherent(struct device *dev, size_t size,
84 dma_addr_t *dma_handle, gfp_t flags)
85{
86 struct dma_map_ops *ops = get_dma_ops(dev);
87 void *vaddr;
88
89 if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
90 return vaddr;
91
92 vaddr = ops->alloc(dev, size, dma_handle, flags, NULL);
93 debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
94 return vaddr;
95}
96
97static inline void dma_free_coherent(struct device *dev, size_t size,
98 void *vaddr, dma_addr_t dev_addr)
99{
100 struct dma_map_ops *ops = get_dma_ops(dev);
101
102 if (dma_release_from_coherent(dev, get_order(size), vaddr))
103 return;
104
105 debug_dma_free_coherent(dev, size, vaddr, dev_addr);
106 ops->free(dev, size, vaddr, dev_addr, NULL);
107}
108
109/*
110 * There is no dma_cache_sync() implementation, so just return NULL here.
111 */
112static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
113 dma_addr_t *handle, gfp_t flags)
114{
115 return NULL;
116}
117
118static inline void dma_free_noncoherent(struct device *dev, size_t size,
119 void *cpu_addr, dma_addr_t handle)
120{
121}
122
123#endif /* __KERNEL__ */
124#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
new file mode 100644
index 000000000000..cf284649dfcb
--- /dev/null
+++ b/arch/arm64/include/asm/elf.h
@@ -0,0 +1,179 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_ELF_H
17#define __ASM_ELF_H
18
19#include <asm/hwcap.h>
20
21/*
22 * ELF register definitions..
23 */
24#include <asm/ptrace.h>
25#include <asm/user.h>
26
27typedef unsigned long elf_greg_t;
28typedef unsigned long elf_freg_t[3];
29
30#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
31typedef elf_greg_t elf_gregset_t[ELF_NGREG];
32
33typedef struct user_fp elf_fpregset_t;
34
35#define EM_AARCH64 183
36
37/*
38 * AArch64 static relocation types.
39 */
40
41/* Miscellaneous. */
42#define R_ARM_NONE 0
43#define R_AARCH64_NONE 256
44
45/* Data. */
46#define R_AARCH64_ABS64 257
47#define R_AARCH64_ABS32 258
48#define R_AARCH64_ABS16 259
49#define R_AARCH64_PREL64 260
50#define R_AARCH64_PREL32 261
51#define R_AARCH64_PREL16 262
52
53/* Instructions. */
54#define R_AARCH64_MOVW_UABS_G0 263
55#define R_AARCH64_MOVW_UABS_G0_NC 264
56#define R_AARCH64_MOVW_UABS_G1 265
57#define R_AARCH64_MOVW_UABS_G1_NC 266
58#define R_AARCH64_MOVW_UABS_G2 267
59#define R_AARCH64_MOVW_UABS_G2_NC 268
60#define R_AARCH64_MOVW_UABS_G3 269
61
62#define R_AARCH64_MOVW_SABS_G0 270
63#define R_AARCH64_MOVW_SABS_G1 271
64#define R_AARCH64_MOVW_SABS_G2 272
65
66#define R_AARCH64_LD_PREL_LO19 273
67#define R_AARCH64_ADR_PREL_LO21 274
68#define R_AARCH64_ADR_PREL_PG_HI21 275
69#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
70#define R_AARCH64_ADD_ABS_LO12_NC 277
71#define R_AARCH64_LDST8_ABS_LO12_NC 278
72
73#define R_AARCH64_TSTBR14 279
74#define R_AARCH64_CONDBR19 280
75#define R_AARCH64_JUMP26 282
76#define R_AARCH64_CALL26 283
77#define R_AARCH64_LDST16_ABS_LO12_NC 284
78#define R_AARCH64_LDST32_ABS_LO12_NC 285
79#define R_AARCH64_LDST64_ABS_LO12_NC 286
80#define R_AARCH64_LDST128_ABS_LO12_NC 299
81
82#define R_AARCH64_MOVW_PREL_G0 287
83#define R_AARCH64_MOVW_PREL_G0_NC 288
84#define R_AARCH64_MOVW_PREL_G1 289
85#define R_AARCH64_MOVW_PREL_G1_NC 290
86#define R_AARCH64_MOVW_PREL_G2 291
87#define R_AARCH64_MOVW_PREL_G2_NC 292
88#define R_AARCH64_MOVW_PREL_G3 293
89
90
91/*
92 * These are used to set parameters in the core dumps.
93 */
94#define ELF_CLASS ELFCLASS64
95#define ELF_DATA ELFDATA2LSB
96#define ELF_ARCH EM_AARCH64
97
98#define ELF_PLATFORM_SIZE 16
99#define ELF_PLATFORM ("aarch64")
100
101/*
102 * This is used to ensure we don't load something for the wrong architecture.
103 */
104#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64)
105
106#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X)
107
108#define CORE_DUMP_USE_REGSET
109#define ELF_EXEC_PAGESIZE PAGE_SIZE
110
111/*
112 * This is the location that an ET_DYN program is loaded if exec'ed. Typical
113 * use of this is to invoke "./ld.so someprog" to test out a new version of
114 * the loader. We need to make sure that it is out of the way of the program
115 * that it will "exec", and that there is sufficient room for the brk.
116 */
117extern unsigned long randomize_et_dyn(unsigned long base);
118#define ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_64 / 3))
119
120/*
121 * When the program starts, a1 contains a pointer to a function to be
122 * registered with atexit, as per the SVR4 ABI. A value of 0 means we have no
123 * such handler.
124 */
125#define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0
126
127#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
128
129#define ARCH_DLINFO \
130do { \
131 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
132 (elf_addr_t)current->mm->context.vdso); \
133} while (0)
134
135#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
136struct linux_binprm;
137extern int arch_setup_additional_pages(struct linux_binprm *bprm,
138 int uses_interp);
139
140/* 1GB of VA */
141#ifdef CONFIG_COMPAT
142#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
143 0x7ff >> (PAGE_SHIFT - 12) : \
144 0x3ffff >> (PAGE_SHIFT - 12))
145#else
146#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
147#endif
148
149struct mm_struct;
150extern unsigned long arch_randomize_brk(struct mm_struct *mm);
151#define arch_randomize_brk arch_randomize_brk
152
153#ifdef CONFIG_COMPAT
154#define EM_ARM 40
155#define COMPAT_ELF_PLATFORM ("v8l")
156
157#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
158
159/* AArch32 registers. */
160#define COMPAT_ELF_NGREG 18
161typedef unsigned int compat_elf_greg_t;
162typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
163
164/* AArch32 EABI. */
165#define EF_ARM_EABI_MASK 0xff000000
166#define compat_elf_check_arch(x) (((x)->e_machine == EM_ARM) && \
167 ((x)->e_flags & EF_ARM_EABI_MASK))
168
169#define compat_start_thread compat_start_thread
170#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT);
171#define COMPAT_ARCH_DLINFO
172extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
173 int uses_interp);
174#define compat_arch_setup_additional_pages \
175 aarch32_setup_vectors_page
176
177#endif /* CONFIG_COMPAT */
178
179#endif
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
new file mode 100644
index 000000000000..ac63519b7b90
--- /dev/null
+++ b/arch/arm64/include/asm/exception.h
@@ -0,0 +1,23 @@
1/*
2 * Based on arch/arm/include/asm/exception.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_EXCEPTION_H
19#define __ASM_EXCEPTION_H
20
21#define __exception __attribute__((section(".exception.text")))
22
23#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
new file mode 100644
index 000000000000..db0563c23482
--- /dev/null
+++ b/arch/arm64/include/asm/exec.h
@@ -0,0 +1,23 @@
1/*
2 * Based on arch/arm/include/asm/exec.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_EXEC_H
19#define __ASM_EXEC_H
20
21extern unsigned long arch_align_stack(unsigned long sp);
22
23#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/fb.h b/arch/arm64/include/asm/fb.h
new file mode 100644
index 000000000000..adb88a64b2fe
--- /dev/null
+++ b/arch/arm64/include/asm/fb.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_FB_H_
17#define __ASM_FB_H_
18
19#include <linux/fb.h>
20#include <linux/fs.h>
21#include <asm/page.h>
22
23static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
24 unsigned long off)
25{
26 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
27}
28
29static inline int fb_is_primary_device(struct fb_info *info)
30{
31 return 0;
32}
33
34#endif /* __ASM_FB_H_ */
diff --git a/arch/arm64/include/asm/fcntl.h b/arch/arm64/include/asm/fcntl.h
new file mode 100644
index 000000000000..cd2e630c235e
--- /dev/null
+++ b/arch/arm64/include/asm/fcntl.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_FCNTL_H
17#define __ASM_FCNTL_H
18
19/*
20 * Using our own definitions for AArch32 (compat) support.
21 */
22#define O_DIRECTORY 040000 /* must be a directory */
23#define O_NOFOLLOW 0100000 /* don't follow links */
24#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
25#define O_LARGEFILE 0400000
26
27#include <asm-generic/fcntl.h>
28
29#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
new file mode 100644
index 000000000000..b42fab9f62a9
--- /dev/null
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_FP_H
17#define __ASM_FP_H
18
19#include <asm/ptrace.h>
20
21#ifndef __ASSEMBLY__
22
23/*
24 * FP/SIMD storage area has:
25 * - FPSR and FPCR
26 * - 32 128-bit data registers
27 *
28 * Note that user_fp forms a prefix of this structure, which is relied
29 * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
30 * form a prefix of struct fpsimd_state.
31 */
32struct fpsimd_state {
33 union {
34 struct user_fpsimd_state user_fpsimd;
35 struct {
36 __uint128_t vregs[32];
37 u32 fpsr;
38 u32 fpcr;
39 };
40 };
41};
42
43#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
44/* Masks for extracting the FPSR and FPCR from the FPSCR */
45#define VFP_FPSCR_STAT_MASK 0xf800009f
46#define VFP_FPSCR_CTRL_MASK 0x07f79f00
47/*
48 * The VFP state has 32x64-bit registers and a single 32-bit
49 * control/status register.
50 */
51#define VFP_STATE_SIZE ((32 * 8) + 4)
52#endif
53
54struct task_struct;
55
56extern void fpsimd_save_state(struct fpsimd_state *state);
57extern void fpsimd_load_state(struct fpsimd_state *state);
58
59extern void fpsimd_thread_switch(struct task_struct *next);
60extern void fpsimd_flush_thread(void);
61
62#endif
63
64#endif
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
new file mode 100644
index 000000000000..3468ae8439fa
--- /dev/null
+++ b/arch/arm64/include/asm/futex.h
@@ -0,0 +1,136 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_FUTEX_H
17#define __ASM_FUTEX_H
18
19#ifdef __KERNEL__
20
21#include <linux/futex.h>
22#include <linux/uaccess.h>
23#include <asm/errno.h>
24
25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
26 asm volatile( \
27"1: ldaxr %w1, %2\n" \
28 insn "\n" \
29"2: stlxr %w3, %w0, %2\n" \
30" cbnz %w3, 1b\n" \
31"3:\n" \
32" .pushsection .fixup,\"ax\"\n" \
33"4: mov %w0, %w5\n" \
34" b 3b\n" \
35" .popsection\n" \
36" .pushsection __ex_table,\"a\"\n" \
37" .align 3\n" \
38" .quad 1b, 4b, 2b, 4b\n" \
39" .popsection\n" \
40 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
41 : "r" (oparg), "Ir" (-EFAULT) \
42 : "cc")
43
44static inline int
45futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
46{
47 int op = (encoded_op >> 28) & 7;
48 int cmp = (encoded_op >> 24) & 15;
49 int oparg = (encoded_op << 8) >> 20;
50 int cmparg = (encoded_op << 20) >> 20;
51 int oldval = 0, ret, tmp;
52
53 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
54 oparg = 1 << oparg;
55
56 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
57 return -EFAULT;
58
59 pagefault_disable(); /* implies preempt_disable() */
60
61 switch (op) {
62 case FUTEX_OP_SET:
63 __futex_atomic_op("mov %w0, %w4",
64 ret, oldval, uaddr, tmp, oparg);
65 break;
66 case FUTEX_OP_ADD:
67 __futex_atomic_op("add %w0, %w1, %w4",
68 ret, oldval, uaddr, tmp, oparg);
69 break;
70 case FUTEX_OP_OR:
71 __futex_atomic_op("orr %w0, %w1, %w4",
72 ret, oldval, uaddr, tmp, oparg);
73 break;
74 case FUTEX_OP_ANDN:
75 __futex_atomic_op("and %w0, %w1, %w4",
76 ret, oldval, uaddr, tmp, ~oparg);
77 break;
78 case FUTEX_OP_XOR:
79 __futex_atomic_op("eor %w0, %w1, %w4",
80 ret, oldval, uaddr, tmp, oparg);
81 break;
82 default:
83 ret = -ENOSYS;
84 }
85
86 pagefault_enable(); /* subsumes preempt_enable() */
87
88 if (!ret) {
89 switch (cmp) {
90 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
91 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
92 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
93 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
94 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
95 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
96 default: ret = -ENOSYS;
97 }
98 }
99 return ret;
100}
101
102static inline int
103futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
104 u32 oldval, u32 newval)
105{
106 int ret = 0;
107 u32 val, tmp;
108
109 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
110 return -EFAULT;
111
112 asm volatile("// futex_atomic_cmpxchg_inatomic\n"
113"1: ldaxr %w1, %2\n"
114" sub %w3, %w1, %w4\n"
115" cbnz %w3, 3f\n"
116"2: stlxr %w3, %w5, %2\n"
117" cbnz %w3, 1b\n"
118"3:\n"
119" .pushsection .fixup,\"ax\"\n"
120"4: mov %w0, %w6\n"
121" b 3b\n"
122" .popsection\n"
123" .pushsection __ex_table,\"a\"\n"
124" .align 3\n"
125" .quad 1b, 4b, 2b, 4b\n"
126" .popsection\n"
127 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
128 : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
129 : "cc", "memory");
130
131 *uval = val;
132 return ret;
133}
134
135#endif /* __KERNEL__ */
136#endif /* __ASM_FUTEX_H */
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
new file mode 100644
index 000000000000..507546353d62
--- /dev/null
+++ b/arch/arm64/include/asm/hardirq.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_HARDIRQ_H
17#define __ASM_HARDIRQ_H
18
19#include <linux/cache.h>
20#include <linux/threads.h>
21#include <asm/irq.h>
22
23#define NR_IPI 4
24
25typedef struct {
26 unsigned int __softirq_pending;
27#ifdef CONFIG_SMP
28 unsigned int ipi_irqs[NR_IPI];
29#endif
30} ____cacheline_aligned irq_cpustat_t;
31
32#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
33
34#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
35#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
36
37#ifdef CONFIG_SMP
38u64 smp_irq_stat_cpu(unsigned int cpu);
39#define arch_irq_stat_cpu smp_irq_stat_cpu
40#endif
41
42#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
43
44static inline void ack_bad_irq(unsigned int irq)
45{
46 extern unsigned long irq_err_count;
47 irq_err_count++;
48}
49
50extern void handle_IRQ(unsigned int, struct pt_regs *);
51
52#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000000..d064047612b1
--- /dev/null
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -0,0 +1,137 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_HW_BREAKPOINT_H
17#define __ASM_HW_BREAKPOINT_H
18
19#ifdef __KERNEL__
20
21struct arch_hw_breakpoint_ctrl {
22 u32 __reserved : 19,
23 len : 8,
24 type : 2,
25 privilege : 2,
26 enabled : 1;
27};
28
29struct arch_hw_breakpoint {
30 u64 address;
31 u64 trigger;
32 struct arch_hw_breakpoint_ctrl ctrl;
33};
34
35static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
36{
37 return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
38 ctrl.enabled;
39}
40
41static inline void decode_ctrl_reg(u32 reg,
42 struct arch_hw_breakpoint_ctrl *ctrl)
43{
44 ctrl->enabled = reg & 0x1;
45 reg >>= 1;
46 ctrl->privilege = reg & 0x3;
47 reg >>= 2;
48 ctrl->type = reg & 0x3;
49 reg >>= 2;
50 ctrl->len = reg & 0xff;
51}
52
53/* Breakpoint */
54#define ARM_BREAKPOINT_EXECUTE 0
55
56/* Watchpoints */
57#define ARM_BREAKPOINT_LOAD 1
58#define ARM_BREAKPOINT_STORE 2
59#define AARCH64_ESR_ACCESS_MASK (1 << 6)
60
61/* Privilege Levels */
62#define AARCH64_BREAKPOINT_EL1 1
63#define AARCH64_BREAKPOINT_EL0 2
64
65/* Lengths */
66#define ARM_BREAKPOINT_LEN_1 0x1
67#define ARM_BREAKPOINT_LEN_2 0x3
68#define ARM_BREAKPOINT_LEN_4 0xf
69#define ARM_BREAKPOINT_LEN_8 0xff
70
71/* Kernel stepping */
72#define ARM_KERNEL_STEP_NONE 0
73#define ARM_KERNEL_STEP_ACTIVE 1
74#define ARM_KERNEL_STEP_SUSPEND 2
75
76/*
77 * Limits.
78 * Changing these will require modifications to the register accessors.
79 */
80#define ARM_MAX_BRP 16
81#define ARM_MAX_WRP 16
82#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
83
84/* Virtual debug register bases. */
85#define AARCH64_DBG_REG_BVR 0
86#define AARCH64_DBG_REG_BCR (AARCH64_DBG_REG_BVR + ARM_MAX_BRP)
87#define AARCH64_DBG_REG_WVR (AARCH64_DBG_REG_BCR + ARM_MAX_BRP)
88#define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP)
89
90/* Debug register names. */
91#define AARCH64_DBG_REG_NAME_BVR "bvr"
92#define AARCH64_DBG_REG_NAME_BCR "bcr"
93#define AARCH64_DBG_REG_NAME_WVR "wvr"
94#define AARCH64_DBG_REG_NAME_WCR "wcr"
95
96/* Accessor macros for the debug registers. */
97#define AARCH64_DBG_READ(N, REG, VAL) do {\
98 asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\
99} while (0)
100
101#define AARCH64_DBG_WRITE(N, REG, VAL) do {\
102 asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\
103} while (0)
104
105struct task_struct;
106struct notifier_block;
107struct perf_event;
108struct pmu;
109
110extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
111 int *gen_len, int *gen_type);
112extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
113extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
114extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
115 unsigned long val, void *data);
116
117extern int arch_install_hw_breakpoint(struct perf_event *bp);
118extern void arch_uninstall_hw_breakpoint(struct perf_event *bp);
119extern void hw_breakpoint_pmu_read(struct perf_event *bp);
120extern int hw_breakpoint_slots(int type);
121
122#ifdef CONFIG_HAVE_HW_BREAKPOINT
123extern void hw_breakpoint_thread_switch(struct task_struct *next);
124extern void ptrace_hw_copy_thread(struct task_struct *task);
125#else
126static inline void hw_breakpoint_thread_switch(struct task_struct *next)
127{
128}
129static inline void ptrace_hw_copy_thread(struct task_struct *task)
130{
131}
132#endif
133
134extern struct pmu perf_ops_bp;
135
136#endif /* __KERNEL__ */
137#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
new file mode 100644
index 000000000000..f8190ba45a3e
--- /dev/null
+++ b/arch/arm64/include/asm/hwcap.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_HWCAP_H
17#define __ASM_HWCAP_H
18
19/*
20 * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
21 */
22#define HWCAP_FP (1 << 0)
23#define HWCAP_ASIMD (1 << 1)
24
25#define COMPAT_HWCAP_HALF (1 << 1)
26#define COMPAT_HWCAP_THUMB (1 << 2)
27#define COMPAT_HWCAP_FAST_MULT (1 << 4)
28#define COMPAT_HWCAP_VFP (1 << 6)
29#define COMPAT_HWCAP_EDSP (1 << 7)
30#define COMPAT_HWCAP_NEON (1 << 12)
31#define COMPAT_HWCAP_VFPv3 (1 << 13)
32#define COMPAT_HWCAP_TLS (1 << 15)
33#define COMPAT_HWCAP_VFPv4 (1 << 16)
34#define COMPAT_HWCAP_IDIVA (1 << 17)
35#define COMPAT_HWCAP_IDIVT (1 << 18)
36#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
37
38#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
39/*
40 * This yields a mask that user programs can use to figure out what
41 * instruction set this cpu supports.
42 */
43#define ELF_HWCAP (elf_hwcap)
44#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
45 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
46 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
47 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
48 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
49
50extern unsigned int elf_hwcap;
51#endif
52
53#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
new file mode 100644
index 000000000000..74a2a7d304a9
--- /dev/null
+++ b/arch/arm64/include/asm/io.h
@@ -0,0 +1,258 @@
1/*
2 * Based on arch/arm/include/asm/io.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_IO_H
20#define __ASM_IO_H
21
22#ifdef __KERNEL__
23
24#include <linux/types.h>
25
26#include <asm/byteorder.h>
27#include <asm/barrier.h>
28#include <asm/pgtable.h>
29
30/*
31 * Generic IO read/write. These perform native-endian accesses.
32 */
33static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
34{
35 asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
36}
37
38static inline void __raw_writew(u16 val, volatile void __iomem *addr)
39{
40 asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
41}
42
43static inline void __raw_writel(u32 val, volatile void __iomem *addr)
44{
45 asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
46}
47
48static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
49{
50 asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
51}
52
53static inline u8 __raw_readb(const volatile void __iomem *addr)
54{
55 u8 val;
56 asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
57 return val;
58}
59
60static inline u16 __raw_readw(const volatile void __iomem *addr)
61{
62 u16 val;
63 asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
64 return val;
65}
66
67static inline u32 __raw_readl(const volatile void __iomem *addr)
68{
69 u32 val;
70 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
71 return val;
72}
73
74static inline u64 __raw_readq(const volatile void __iomem *addr)
75{
76 u64 val;
77 asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
78 return val;
79}
80
81/* IO barriers */
82#define __iormb() rmb()
83#define __iowmb() wmb()
84
85#define mmiowb() do { } while (0)
86
87/*
88 * Relaxed I/O memory access primitives. These follow the Device memory
89 * ordering rules but do not guarantee any ordering relative to Normal memory
90 * accesses.
91 */
92#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
93#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
94#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
95
96#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
97#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
98#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
99
100/*
101 * I/O memory access primitives. Reads are ordered relative to any
102 * following Normal memory access. Writes are ordered relative to any prior
103 * Normal memory access.
104 */
105#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
106#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
107#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
108
109#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
110#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
111#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
112
113/*
114 * I/O port access primitives.
115 */
116#define IO_SPACE_LIMIT 0xffff
117#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL)
118
119static inline u8 inb(unsigned long addr)
120{
121 return readb(addr + PCI_IOBASE);
122}
123
124static inline u16 inw(unsigned long addr)
125{
126 return readw(addr + PCI_IOBASE);
127}
128
129static inline u32 inl(unsigned long addr)
130{
131 return readl(addr + PCI_IOBASE);
132}
133
134static inline void outb(u8 b, unsigned long addr)
135{
136 writeb(b, addr + PCI_IOBASE);
137}
138
139static inline void outw(u16 b, unsigned long addr)
140{
141 writew(b, addr + PCI_IOBASE);
142}
143
144static inline void outl(u32 b, unsigned long addr)
145{
146 writel(b, addr + PCI_IOBASE);
147}
148
149#define inb_p(addr) inb(addr)
150#define inw_p(addr) inw(addr)
151#define inl_p(addr) inl(addr)
152
153#define outb_p(x, addr) outb((x), (addr))
154#define outw_p(x, addr) outw((x), (addr))
155#define outl_p(x, addr) outl((x), (addr))
156
157static inline void insb(unsigned long addr, void *buffer, int count)
158{
159 u8 *buf = buffer;
160 while (count--)
161 *buf++ = __raw_readb(addr + PCI_IOBASE);
162}
163
164static inline void insw(unsigned long addr, void *buffer, int count)
165{
166 u16 *buf = buffer;
167 while (count--)
168 *buf++ = __raw_readw(addr + PCI_IOBASE);
169}
170
171static inline void insl(unsigned long addr, void *buffer, int count)
172{
173 u32 *buf = buffer;
174 while (count--)
175 *buf++ = __raw_readl(addr + PCI_IOBASE);
176}
177
178static inline void outsb(unsigned long addr, const void *buffer, int count)
179{
180 const u8 *buf = buffer;
181 while (count--)
182 __raw_writeb(*buf++, addr + PCI_IOBASE);
183}
184
185static inline void outsw(unsigned long addr, const void *buffer, int count)
186{
187 const u16 *buf = buffer;
188 while (count--)
189 __raw_writew(*buf++, addr + PCI_IOBASE);
190}
191
192static inline void outsl(unsigned long addr, const void *buffer, int count)
193{
194 const u32 *buf = buffer;
195 while (count--)
196 __raw_writel(*buf++, addr + PCI_IOBASE);
197}
198
199#define insb_p(port,to,len) insb(port,to,len)
200#define insw_p(port,to,len) insw(port,to,len)
201#define insl_p(port,to,len) insl(port,to,len)
202
203#define outsb_p(port,from,len) outsb(port,from,len)
204#define outsw_p(port,from,len) outsw(port,from,len)
205#define outsl_p(port,from,len) outsl(port,from,len)
206
207/*
208 * String version of I/O memory access operations.
209 */
210extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
211extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
212extern void __memset_io(volatile void __iomem *, int, size_t);
213
214#define memset_io(c,v,l) __memset_io((c),(v),(l))
215#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
216#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
217
218/*
219 * I/O memory mapping functions.
220 */
221extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
222extern void __iounmap(volatile void __iomem *addr);
223
224#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
225#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
226#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
227
228#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
229#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
230#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC)
231#define iounmap __iounmap
232
233#define ARCH_HAS_IOREMAP_WC
234#include <asm-generic/iomap.h>
235
236/*
237 * More restrictive address range checking than the default implementation
238 * (PHYS_OFFSET and PHYS_MASK taken into account).
239 */
240#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
241extern int valid_phys_addr_range(unsigned long addr, size_t size);
242extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
243
244extern int devmem_is_allowed(unsigned long pfn);
245
246/*
247 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
248 * access
249 */
250#define xlate_dev_mem_ptr(p) __va(p)
251
252/*
253 * Convert a virtual cached pointer to an uncached pointer
254 */
255#define xlate_dev_kmem_ptr(p) p
256
257#endif /* __KERNEL__ */
258#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
new file mode 100644
index 000000000000..a4e1cad3202a
--- /dev/null
+++ b/arch/arm64/include/asm/irq.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_IRQ_H
2#define __ASM_IRQ_H
3
4#include <asm-generic/irq.h>
5
6extern void (*handle_arch_irq)(struct pt_regs *);
7
8#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
new file mode 100644
index 000000000000..aa11943b8502
--- /dev/null
+++ b/arch/arm64/include/asm/irqflags.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_IRQFLAGS_H
17#define __ASM_IRQFLAGS_H
18
19#ifdef __KERNEL__
20
21#include <asm/ptrace.h>
22
23/*
24 * CPU interrupt mask handling.
25 */
26static inline unsigned long arch_local_irq_save(void)
27{
28 unsigned long flags;
29 asm volatile(
30 "mrs %0, daif // arch_local_irq_save\n"
31 "msr daifset, #2"
32 : "=r" (flags)
33 :
34 : "memory");
35 return flags;
36}
37
38static inline void arch_local_irq_enable(void)
39{
40 asm volatile(
41 "msr daifclr, #2 // arch_local_irq_enable"
42 :
43 :
44 : "memory");
45}
46
47static inline void arch_local_irq_disable(void)
48{
49 asm volatile(
50 "msr daifset, #2 // arch_local_irq_disable"
51 :
52 :
53 : "memory");
54}
55
56#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
57#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
58
59/*
60 * Save the current interrupt enable state.
61 */
62static inline unsigned long arch_local_save_flags(void)
63{
64 unsigned long flags;
65 asm volatile(
66 "mrs %0, daif // arch_local_save_flags"
67 : "=r" (flags)
68 :
69 : "memory");
70 return flags;
71}
72
73/*
74 * restore saved IRQ state
75 */
76static inline void arch_local_irq_restore(unsigned long flags)
77{
78 asm volatile(
79 "msr daif, %0 // arch_local_irq_restore"
80 :
81 : "r" (flags)
82 : "memory");
83}
84
85static inline int arch_irqs_disabled_flags(unsigned long flags)
86{
87 return flags & PSR_I_BIT;
88}
89
90#endif
91#endif
diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h
new file mode 100644
index 000000000000..6afeed2467f1
--- /dev/null
+++ b/arch/arm64/include/asm/memblock.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_MEMBLOCK_H
17#define __ASM_MEMBLOCK_H
18
19extern void arm64_memblock_init(void);
20
21#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
new file mode 100644
index 000000000000..1cac16a001cb
--- /dev/null
+++ b/arch/arm64/include/asm/memory.h
@@ -0,0 +1,144 @@
1/*
2 * Based on arch/arm/include/asm/memory.h
3 *
4 * Copyright (C) 2000-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Note: this file should not be included by non-asm/.h files
20 */
21#ifndef __ASM_MEMORY_H
22#define __ASM_MEMORY_H
23
24#include <linux/compiler.h>
25#include <linux/const.h>
26#include <linux/types.h>
27#include <asm/sizes.h>
28
29/*
30 * Allow for constants defined here to be used from assembly code
31 * by prepending the UL suffix only with actual C code compilation.
32 */
33#define UL(x) _AC(x, UL)
34
35/*
36 * PAGE_OFFSET - the virtual address of the start of the kernel image.
37 * VA_BITS - the maximum number of bits for virtual addresses.
38 * TASK_SIZE - the maximum size of a user space task.
39 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
40 * The module space lives between the addresses given by TASK_SIZE
41 * and PAGE_OFFSET - it must be within 128MB of the kernel text.
42 */
43#define PAGE_OFFSET UL(0xffffffc000000000)
44#define MODULES_END (PAGE_OFFSET)
45#define MODULES_VADDR (MODULES_END - SZ_64M)
46#define VA_BITS (39)
47#define TASK_SIZE_64 (UL(1) << VA_BITS)
48
49#ifdef CONFIG_COMPAT
50#define TASK_SIZE_32 UL(0x100000000)
51#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
52 TASK_SIZE_32 : TASK_SIZE_64)
53#else
54#define TASK_SIZE TASK_SIZE_64
55#endif /* CONFIG_COMPAT */
56
57#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
58
59#if TASK_SIZE_64 > MODULES_VADDR
60#error Top of 64-bit user space clashes with start of module space
61#endif
62
63/*
64 * Physical vs virtual RAM address space conversion. These are
65 * private definitions which should NOT be used outside memory.h
66 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
67 */
68#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
69#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
70
71/*
72 * Convert a physical address to a Page Frame Number and back
73 */
74#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
75#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
76
77/*
78 * Convert a page to/from a physical address
79 */
80#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
81#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
82
83/*
84 * Memory types available.
85 */
86#define MT_DEVICE_nGnRnE 0
87#define MT_DEVICE_nGnRE 1
88#define MT_DEVICE_GRE 2
89#define MT_NORMAL_NC 3
90#define MT_NORMAL 4
91
92#ifndef __ASSEMBLY__
93
94extern phys_addr_t memstart_addr;
95/* PHYS_OFFSET - the physical address of the start of memory. */
96#define PHYS_OFFSET ({ memstart_addr; })
97
98/*
99 * PFNs are used to describe any physical page; this means
100 * PFN 0 == physical address 0.
101 *
102 * This is the PFN of the first RAM page in the kernel
103 * direct-mapped view. We assume this is the first page
104 * of RAM in the mem_map as well.
105 */
106#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
107
108/*
109 * Note: Drivers should NOT use these. They are the wrong
110 * translation for translating DMA addresses. Use the driver
111 * DMA support - see dma-mapping.h.
112 */
113static inline phys_addr_t virt_to_phys(const volatile void *x)
114{
115 return __virt_to_phys((unsigned long)(x));
116}
117
118static inline void *phys_to_virt(phys_addr_t x)
119{
120 return (void *)(__phys_to_virt(x));
121}
122
123/*
124 * Drivers should NOT use these either.
125 */
126#define __pa(x) __virt_to_phys((unsigned long)(x))
127#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
128#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
129
130/*
131 * virt_to_page(k) convert a _valid_ virtual address to struct page *
132 * virt_addr_valid(k) indicates whether a virtual address is valid
133 */
134#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
135
136#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
137#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
138 ((void *)(kaddr) < (void *)high_memory))
139
140#endif
141
142#include <asm-generic/memory_model.h>
143
144#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
new file mode 100644
index 000000000000..d4f7fd5b9e33
--- /dev/null
+++ b/arch/arm64/include/asm/mmu.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_MMU_H
17#define __ASM_MMU_H
18
19typedef struct {
20 unsigned int id;
21 raw_spinlock_t id_lock;
22 void *vdso;
23} mm_context_t;
24
25#define ASID(mm) ((mm)->context.id & 0xffff)
26
27extern void paging_init(void);
28extern void setup_mm_for_reboot(void);
29
30#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
new file mode 100644
index 000000000000..f68465dee026
--- /dev/null
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -0,0 +1,152 @@
1/*
2 * Based on arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_MMU_CONTEXT_H
20#define __ASM_MMU_CONTEXT_H
21
22#include <linux/compiler.h>
23#include <linux/sched.h>
24
25#include <asm/cacheflush.h>
26#include <asm/proc-fns.h>
27#include <asm-generic/mm_hooks.h>
28#include <asm/cputype.h>
29#include <asm/pgtable.h>
30
31#define MAX_ASID_BITS 16
32
33extern unsigned int cpu_last_asid;
34
35void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
36void __new_context(struct mm_struct *mm);
37
38/*
39 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
40 */
41static inline void cpu_set_reserved_ttbr0(void)
42{
43 unsigned long ttbr = page_to_phys(empty_zero_page);
44
45 asm(
46 " msr ttbr0_el1, %0 // set TTBR0\n"
47 " isb"
48 :
49 : "r" (ttbr));
50}
51
52static inline void switch_new_context(struct mm_struct *mm)
53{
54 unsigned long flags;
55
56 __new_context(mm);
57
58 local_irq_save(flags);
59 cpu_switch_mm(mm->pgd, mm);
60 local_irq_restore(flags);
61}
62
63static inline void check_and_switch_context(struct mm_struct *mm,
64 struct task_struct *tsk)
65{
66 /*
67 * Required during context switch to avoid speculative page table
68 * walking with the wrong TTBR.
69 */
70 cpu_set_reserved_ttbr0();
71
72 if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
73 /*
74 * The ASID is from the current generation, just switch to the
75 * new pgd. This condition is only true for calls from
76 * context_switch() and interrupts are already disabled.
77 */
78 cpu_switch_mm(mm->pgd, mm);
79 else if (irqs_disabled())
80 /*
81 * Defer the new ASID allocation until after the context
82 * switch critical region since __new_context() cannot be
83 * called with interrupts disabled.
84 */
85 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
86 else
87 /*
88 * That is a direct call to switch_mm() or activate_mm() with
89 * interrupts enabled and a new context.
90 */
91 switch_new_context(mm);
92}
93
94#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
95#define destroy_context(mm) do { } while(0)
96
97#define finish_arch_post_lock_switch \
98 finish_arch_post_lock_switch
99static inline void finish_arch_post_lock_switch(void)
100{
101 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
102 struct mm_struct *mm = current->mm;
103 unsigned long flags;
104
105 __new_context(mm);
106
107 local_irq_save(flags);
108 cpu_switch_mm(mm->pgd, mm);
109 local_irq_restore(flags);
110 }
111}
112
113/*
114 * This is called when "tsk" is about to enter lazy TLB mode.
115 *
116 * mm: describes the currently active mm context
117 * tsk: task which is entering lazy tlb
118 * cpu: cpu number which is entering lazy tlb
119 *
120 * tsk->mm will be NULL
121 */
122static inline void
123enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
124{
125}
126
127/*
128 * This is the actual mm switch as far as the scheduler
129 * is concerned. No registers are touched. We avoid
130 * calling the CPU specific function when the mm hasn't
131 * actually changed.
132 */
133static inline void
134switch_mm(struct mm_struct *prev, struct mm_struct *next,
135 struct task_struct *tsk)
136{
137 unsigned int cpu = smp_processor_id();
138
139#ifdef CONFIG_SMP
140 /* check for possible thread migration */
141 if (!cpumask_empty(mm_cpumask(next)) &&
142 !cpumask_test_cpu(cpu, mm_cpumask(next)))
143 __flush_icache_all();
144#endif
145 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
146 check_and_switch_context(next, tsk);
147}
148
149#define deactivate_mm(tsk,mm) do { } while (0)
150#define activate_mm(prev,next) switch_mm(prev, next, NULL)
151
152#endif
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
new file mode 100644
index 000000000000..e80e232b730e
--- /dev/null
+++ b/arch/arm64/include/asm/module.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_MODULE_H
17#define __ASM_MODULE_H
18
19#include <asm-generic/module.h>
20
21#define MODULE_ARCH_VERMAGIC "aarch64"
22
23#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
new file mode 100644
index 000000000000..46bf66628b6a
--- /dev/null
+++ b/arch/arm64/include/asm/page.h
@@ -0,0 +1,67 @@
1/*
2 * Based on arch/arm/include/asm/page.h
3 *
4 * Copyright (C) 1995-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PAGE_H
20#define __ASM_PAGE_H
21
22/* PAGE_SHIFT determines the page size */
23#ifdef CONFIG_ARM64_64K_PAGES
24#define PAGE_SHIFT 16
25#else
26#define PAGE_SHIFT 12
27#endif
28#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
29#define PAGE_MASK (~(PAGE_SIZE-1))
30
31/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
32#define __HAVE_ARCH_GATE_AREA 1
33
34#ifndef __ASSEMBLY__
35
36#ifdef CONFIG_ARM64_64K_PAGES
37#include <asm/pgtable-2level-types.h>
38#else
39#include <asm/pgtable-3level-types.h>
40#endif
41
42extern void __cpu_clear_user_page(void *p, unsigned long user);
43extern void __cpu_copy_user_page(void *to, const void *from,
44 unsigned long user);
45extern void copy_page(void *to, const void *from);
46extern void clear_page(void *to);
47
48#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
49#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
50
51typedef struct page *pgtable_t;
52
53#ifdef CONFIG_HAVE_ARCH_PFN_VALID
54extern int pfn_valid(unsigned long);
55#endif
56
57#include <asm/memory.h>
58
59#endif /* !__ASSEMBLY__ */
60
61#define VM_DATA_DEFAULT_FLAGS \
62 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
63 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
64
65#include <asm-generic/getorder.h>
66
67#endif
diff --git a/arch/arm64/include/asm/param.h b/arch/arm64/include/asm/param.h
new file mode 100644
index 000000000000..8e3a281d448a
--- /dev/null
+++ b/arch/arm64/include/asm/param.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PARAM_H
17#define __ASM_PARAM_H
18
19#define EXEC_PAGESIZE 65536
20
21#include <asm-generic/param.h>
22
23#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
new file mode 100644
index 000000000000..a6fffd511c5e
--- /dev/null
+++ b/arch/arm64/include/asm/perf_event.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ASM_PERF_EVENT_H
18#define __ASM_PERF_EVENT_H
19
20/* It's quiet around here... */
21
22#endif
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
new file mode 100644
index 000000000000..f214069ec5d5
--- /dev/null
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -0,0 +1,113 @@
1/*
2 * Based on arch/arm/include/asm/pgalloc.h
3 *
4 * Copyright (C) 2000-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PGALLOC_H
20#define __ASM_PGALLOC_H
21
22#include <asm/pgtable-hwdef.h>
23#include <asm/processor.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26
27#define check_pgt_cache() do { } while (0)
28
29#ifndef CONFIG_ARM64_64K_PAGES
30
31static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
32{
33 return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
34}
35
36static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
37{
38 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
39 free_page((unsigned long)pmd);
40}
41
42static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
43{
44 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
45}
46
47#endif /* CONFIG_ARM64_64K_PAGES */
48
49extern pgd_t *pgd_alloc(struct mm_struct *mm);
50extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
51
52#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
53
54static inline pte_t *
55pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
56{
57 return (pte_t *)__get_free_page(PGALLOC_GFP);
58}
59
60static inline pgtable_t
61pte_alloc_one(struct mm_struct *mm, unsigned long addr)
62{
63 struct page *pte;
64
65 pte = alloc_pages(PGALLOC_GFP, 0);
66 if (pte)
67 pgtable_page_ctor(pte);
68
69 return pte;
70}
71
72/*
73 * Free a PTE table.
74 */
75static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
76{
77 if (pte)
78 free_page((unsigned long)pte);
79}
80
81static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
82{
83 pgtable_page_dtor(pte);
84 __free_page(pte);
85}
86
87static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
88 pmdval_t prot)
89{
90 set_pmd(pmdp, __pmd(pte | prot));
91}
92
93/*
94 * Populate the pmdp entry with a pointer to the pte. This pmd is part
95 * of the mm address space.
96 */
97static inline void
98pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
99{
100 /*
101 * The pmd must be loaded with the physical address of the PTE table
102 */
103 __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
104}
105
106static inline void
107pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
108{
109 __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
110}
111#define pmd_pgtable(pmd) pmd_page(pmd)
112
113#endif
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
new file mode 100644
index 000000000000..0a8ed3f94e93
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_2LEVEL_HWDEF_H
17#define __ASM_PGTABLE_2LEVEL_HWDEF_H
18
19/*
20 * With LPAE and 64KB pages, there are 2 levels of page tables. Each level has
21 * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
22 * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
23 * entry representing 512MB. The user and kernel address spaces are limited to
24 * 512GB and therefore we only use 1024 entries in the PGD.
25 */
26#define PTRS_PER_PTE 8192
27#define PTRS_PER_PGD 1024
28
29/*
30 * PGDIR_SHIFT determines the size a top-level page table entry can map.
31 */
32#define PGDIR_SHIFT 29
33#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
34#define PGDIR_MASK (~(PGDIR_SIZE-1))
35
36/*
37 * section address mask and size definitions.
38 */
39#define SECTION_SHIFT 29
40#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
41#define SECTION_MASK (~(SECTION_SIZE-1))
42
43#endif
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
new file mode 100644
index 000000000000..3c3ca7d361e4
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-2level-types.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
17#define __ASM_PGTABLE_2LEVEL_TYPES_H
18
19typedef u64 pteval_t;
20typedef u64 pgdval_t;
21typedef pgdval_t pmdval_t;
22
23#undef STRICT_MM_TYPECHECKS
24
25#ifdef STRICT_MM_TYPECHECKS
26
27/*
28 * These are used to make use of C type-checking..
29 */
30typedef struct { pteval_t pte; } pte_t;
31typedef struct { pgdval_t pgd; } pgd_t;
32typedef struct { pteval_t pgprot; } pgprot_t;
33
34#define pte_val(x) ((x).pte)
35#define pgd_val(x) ((x).pgd)
36#define pgprot_val(x) ((x).pgprot)
37
38#define __pte(x) ((pte_t) { (x) } )
39#define __pgd(x) ((pgd_t) { (x) } )
40#define __pgprot(x) ((pgprot_t) { (x) } )
41
42#else /* !STRICT_MM_TYPECHECKS */
43
44typedef pteval_t pte_t;
45typedef pgdval_t pgd_t;
46typedef pteval_t pgprot_t;
47
48#define pte_val(x) (x)
49#define pgd_val(x) (x)
50#define pgprot_val(x) (x)
51
52#define __pte(x) (x)
53#define __pgd(x) (x)
54#define __pgprot(x) (x)
55
56#endif /* STRICT_MM_TYPECHECKS */
57
58#include <asm-generic/pgtable-nopmd.h>
59
60#endif /* __ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable-3level-hwdef.h b/arch/arm64/include/asm/pgtable-3level-hwdef.h
new file mode 100644
index 000000000000..3dbf941d7767
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-3level-hwdef.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_3LEVEL_HWDEF_H
17#define __ASM_PGTABLE_3LEVEL_HWDEF_H
18
19/*
20 * With LPAE and 4KB pages, there are 3 levels of page tables. Each level has
21 * 512 entries of 8 bytes each, occupying a 4K page. The first level table
22 * covers a range of 512GB, each entry representing 1GB. The user and kernel
23 * address spaces are limited to 512GB each.
24 */
25#define PTRS_PER_PTE 512
26#define PTRS_PER_PMD 512
27#define PTRS_PER_PGD 512
28
29/*
30 * PGDIR_SHIFT determines the size a top-level page table entry can map.
31 */
32#define PGDIR_SHIFT 30
33#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
34#define PGDIR_MASK (~(PGDIR_SIZE-1))
35
36/*
37 * PMD_SHIFT determines the size a middle-level page table entry can map.
38 */
39#define PMD_SHIFT 21
40#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
41#define PMD_MASK (~(PMD_SIZE-1))
42
43/*
44 * section address mask and size definitions.
45 */
46#define SECTION_SHIFT 21
47#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
48#define SECTION_MASK (~(SECTION_SIZE-1))
49
50#endif
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
new file mode 100644
index 000000000000..4489615f14a9
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-3level-types.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
17#define __ASM_PGTABLE_3LEVEL_TYPES_H
18
19typedef u64 pteval_t;
20typedef u64 pmdval_t;
21typedef u64 pgdval_t;
22
23#undef STRICT_MM_TYPECHECKS
24
25#ifdef STRICT_MM_TYPECHECKS
26
27/*
28 * These are used to make use of C type-checking..
29 */
30typedef struct { pteval_t pte; } pte_t;
31typedef struct { pmdval_t pmd; } pmd_t;
32typedef struct { pgdval_t pgd; } pgd_t;
33typedef struct { pteval_t pgprot; } pgprot_t;
34
35#define pte_val(x) ((x).pte)
36#define pmd_val(x) ((x).pmd)
37#define pgd_val(x) ((x).pgd)
38#define pgprot_val(x) ((x).pgprot)
39
40#define __pte(x) ((pte_t) { (x) } )
41#define __pmd(x) ((pmd_t) { (x) } )
42#define __pgd(x) ((pgd_t) { (x) } )
43#define __pgprot(x) ((pgprot_t) { (x) } )
44
45#else /* !STRICT_MM_TYPECHECKS */
46
47typedef pteval_t pte_t;
48typedef pmdval_t pmd_t;
49typedef pgdval_t pgd_t;
50typedef pteval_t pgprot_t;
51
52#define pte_val(x) (x)
53#define pmd_val(x) (x)
54#define pgd_val(x) (x)
55#define pgprot_val(x) (x)
56
57#define __pte(x) (x)
58#define __pmd(x) (x)
59#define __pgd(x) (x)
60#define __pgprot(x) (x)
61
62#endif /* STRICT_MM_TYPECHECKS */
63
64#include <asm-generic/pgtable-nopud.h>
65
66#endif /* __ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
new file mode 100644
index 000000000000..0f3b4581d925
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_HWDEF_H
17#define __ASM_PGTABLE_HWDEF_H
18
19#ifdef CONFIG_ARM64_64K_PAGES
20#include <asm/pgtable-2level-hwdef.h>
21#else
22#include <asm/pgtable-3level-hwdef.h>
23#endif
24
25/*
26 * Hardware page table definitions.
27 *
28 * Level 2 descriptor (PMD).
29 */
30#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
31#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
32#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
33#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
34
35/*
36 * Section
37 */
38#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
39#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
40#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
41#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
42
43/*
44 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
45 */
46#define PMD_ATTRINDX(t) (_AT(pmdval_t, (t)) << 2)
47#define PMD_ATTRINDX_MASK (_AT(pmdval_t, 7) << 2)
48
49/*
50 * Level 3 descriptor (PTE).
51 */
52#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
53#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
54#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
55#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
56#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
57#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
58#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
59#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
60#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
61
62/*
63 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
64 */
65#define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2)
66#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
67
68/*
69 * 40-bit physical address supported.
70 */
71#define PHYS_MASK_SHIFT (40)
72#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
73
74/*
75 * TCR flags.
76 */
77#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0))
78#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
79#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
80#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
81#define TCR_IRGN_WBnWA ((UL(3) << 8) | (UL(3) << 24))
82#define TCR_IRGN_MASK ((UL(3) << 8) | (UL(3) << 24))
83#define TCR_ORGN_NC ((UL(0) << 10) | (UL(0) << 26))
84#define TCR_ORGN_WBWA ((UL(1) << 10) | (UL(1) << 26))
85#define TCR_ORGN_WT ((UL(2) << 10) | (UL(2) << 26))
86#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26))
87#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26))
88#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
89#define TCR_TG0_64K (UL(1) << 14)
90#define TCR_TG1_64K (UL(1) << 30)
91#define TCR_IPS_40BIT (UL(2) << 32)
92#define TCR_ASID16 (UL(1) << 36)
93
94#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
new file mode 100644
index 000000000000..8960239be722
--- /dev/null
+++ b/arch/arm64/include/asm/pgtable.h
@@ -0,0 +1,328 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_H
17#define __ASM_PGTABLE_H
18
19#include <asm/proc-fns.h>
20
21#include <asm/memory.h>
22#include <asm/pgtable-hwdef.h>
23
24/*
25 * Software defined PTE bits definition.
26 */
27#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */
28#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
29#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
30#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31
32/*
33 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
34 */
35#define VMALLOC_START UL(0xffffff8000000000)
36#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
37
38#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
39
40#define FIRST_USER_ADDRESS 0
41
42#ifndef __ASSEMBLY__
43extern void __pte_error(const char *file, int line, unsigned long val);
44extern void __pmd_error(const char *file, int line, unsigned long val);
45extern void __pgd_error(const char *file, int line, unsigned long val);
46
47#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
48#ifndef CONFIG_ARM64_64K_PAGES
49#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
50#endif
51#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
52
53/*
54 * The pgprot_* and protection_map entries will be fixed up at runtime to
55 * include the cachable and bufferable bits based on memory policy, as well as
56 * any architecture dependent bits like global/ASID and SMP shared mapping
57 * bits.
58 */
59#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF
60
61extern pgprot_t pgprot_default;
62
63#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
64
65#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
66#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
67#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
68#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
69#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
70#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
71#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
72#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
73#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
74
75#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
76#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
77#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
78#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
79#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
80#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
81#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
82
83#endif /* __ASSEMBLY__ */
84
85#define __P000 __PAGE_NONE
86#define __P001 __PAGE_READONLY
87#define __P010 __PAGE_COPY
88#define __P011 __PAGE_COPY
89#define __P100 __PAGE_READONLY_EXEC
90#define __P101 __PAGE_READONLY_EXEC
91#define __P110 __PAGE_COPY_EXEC
92#define __P111 __PAGE_COPY_EXEC
93
94#define __S000 __PAGE_NONE
95#define __S001 __PAGE_READONLY
96#define __S010 __PAGE_SHARED
97#define __S011 __PAGE_SHARED
98#define __S100 __PAGE_READONLY_EXEC
99#define __S101 __PAGE_READONLY_EXEC
100#define __S110 __PAGE_SHARED_EXEC
101#define __S111 __PAGE_SHARED_EXEC
102
103#ifndef __ASSEMBLY__
104/*
105 * ZERO_PAGE is a global shared page that is always zero: used
106 * for zero-mapped memory areas etc..
107 */
108extern struct page *empty_zero_page;
109#define ZERO_PAGE(vaddr) (empty_zero_page)
110
111#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
112
113#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
114
115#define pte_none(pte) (!pte_val(pte))
116#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
117#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
118#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
119
120#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
121#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
122#define pte_unmap(pte) do { } while (0)
123#define pte_unmap_nested(pte) do { } while (0)
124
125/*
126 * The following only work if pte_present(). Undefined behaviour otherwise.
127 */
128#define pte_present(pte) (pte_val(pte) & PTE_VALID)
129#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
130#define pte_young(pte) (pte_val(pte) & PTE_AF)
131#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
132#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
133#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
134
135#define pte_present_exec_user(pte) \
136 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
137 (PTE_VALID | PTE_USER))
138
139#define PTE_BIT_FUNC(fn,op) \
140static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
141
142PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
143PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
144PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
145PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
146PTE_BIT_FUNC(mkold, &= ~PTE_AF);
147PTE_BIT_FUNC(mkyoung, |= PTE_AF);
148PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
149
150static inline void set_pte(pte_t *ptep, pte_t pte)
151{
152 *ptep = pte;
153}
154
155extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
156
157static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
158 pte_t *ptep, pte_t pte)
159{
160 if (pte_present_exec_user(pte))
161 __sync_icache_dcache(pte, addr);
162 set_pte(ptep, pte);
163}
164
165/*
166 * Huge pte definitions.
167 */
168#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
169#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
170
171#define __pgprot_modify(prot,mask,bits) \
172 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
173
174#define __HAVE_ARCH_PTE_SPECIAL
175
176/*
177 * Mark the prot value as uncacheable and unbufferable.
178 */
179#define pgprot_noncached(prot) \
180 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
181#define pgprot_writecombine(prot) \
182 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
183#define pgprot_dmacoherent(prot) \
184 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
185#define __HAVE_PHYS_MEM_ACCESS_PROT
186struct file;
187extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
188 unsigned long size, pgprot_t vma_prot);
189
190#define pmd_none(pmd) (!pmd_val(pmd))
191#define pmd_present(pmd) (pmd_val(pmd))
192
193#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
194
195static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
196{
197 *pmdp = pmd;
198 dsb();
199}
200
201static inline void pmd_clear(pmd_t *pmdp)
202{
203 set_pmd(pmdp, __pmd(0));
204}
205
206static inline pte_t *pmd_page_vaddr(pmd_t pmd)
207{
208 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
209}
210
211#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
212
213/*
214 * Conversion functions: convert a page and protection to a page entry,
215 * and a page entry and page directory to the page they refer to.
216 */
217#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
218
219#ifndef CONFIG_ARM64_64K_PAGES
220
221#define pud_none(pud) (!pud_val(pud))
222#define pud_bad(pud) (!(pud_val(pud) & 2))
223#define pud_present(pud) (pud_val(pud))
224
225static inline void set_pud(pud_t *pudp, pud_t pud)
226{
227 *pudp = pud;
228 dsb();
229}
230
231static inline void pud_clear(pud_t *pudp)
232{
233 set_pud(pudp, __pud(0));
234}
235
236static inline pmd_t *pud_page_vaddr(pud_t pud)
237{
238 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
239}
240
241#endif /* CONFIG_ARM64_64K_PAGES */
242
243/* to find an entry in a page-table-directory */
244#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
245
246#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
247
248/* to find an entry in a kernel page-table-directory */
249#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
250
251/* Find an entry in the second-level page table.. */
252#ifndef CONFIG_ARM64_64K_PAGES
253#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
254static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
255{
256 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
257}
258#endif
259
260/* Find an entry in the third-level page table.. */
261#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
262
263static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
264{
265 const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
266 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
267 return pte;
268}
269
270extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
271extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
272
273#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
274#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
275
276/*
277 * Encode and decode a swap entry:
278 * bits 0-1: present (must be zero)
279 * bit 2: PTE_FILE
280 * bits 3-8: swap type
281 * bits 9-63: swap offset
282 */
283#define __SWP_TYPE_SHIFT 3
284#define __SWP_TYPE_BITS 6
285#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
286#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
287
288#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
289#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
290#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
291
292#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
293#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
294
295/*
296 * Ensure that there are not more swap files than can be encoded in the kernel
297 * the PTEs.
298 */
299#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
300
301/*
302 * Encode and decode a file entry:
303 * bits 0-1: present (must be zero)
304 * bit 2: PTE_FILE
305 * bits 3-63: file offset / PAGE_SIZE
306 */
307#define pte_file(pte) (pte_val(pte) & PTE_FILE)
308#define pte_to_pgoff(x) (pte_val(x) >> 3)
309#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
310
311#define PTE_FILE_MAX_BITS 61
312
313extern int kern_addr_valid(unsigned long addr);
314
315#include <asm-generic/pgtable.h>
316
317/*
318 * remap a physical page `pfn' of size `size' with page protection `prot'
319 * into virtual address `from'
320 */
321#define io_remap_pfn_range(vma,from,pfn,size,prot) \
322 remap_pfn_range(vma, from, pfn, size, prot)
323
324#define pgtable_cache_init() do { } while (0)
325
326#endif /* !__ASSEMBLY__ */
327
328#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
new file mode 100644
index 000000000000..e6f087806aaf
--- /dev/null
+++ b/arch/arm64/include/asm/pmu.h
@@ -0,0 +1,82 @@
1/*
2 * Based on arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PMU_H
20#define __ASM_PMU_H
21
22#ifdef CONFIG_HW_PERF_EVENTS
23
24/* The events for a given PMU register set. */
25struct pmu_hw_events {
26 /*
27 * The events that are active on the PMU for the given index.
28 */
29 struct perf_event **events;
30
31 /*
32 * A 1 bit for an index indicates that the counter is being used for
33 * an event. A 0 means that the counter can be used.
34 */
35 unsigned long *used_mask;
36
37 /*
38 * Hardware lock to serialize accesses to PMU registers. Needed for the
39 * read/modify/write sequences.
40 */
41 raw_spinlock_t pmu_lock;
42};
43
44struct arm_pmu {
45 struct pmu pmu;
46 cpumask_t active_irqs;
47 const char *name;
48 irqreturn_t (*handle_irq)(int irq_num, void *dev);
49 void (*enable)(struct hw_perf_event *evt, int idx);
50 void (*disable)(struct hw_perf_event *evt, int idx);
51 int (*get_event_idx)(struct pmu_hw_events *hw_events,
52 struct hw_perf_event *hwc);
53 int (*set_event_filter)(struct hw_perf_event *evt,
54 struct perf_event_attr *attr);
55 u32 (*read_counter)(int idx);
56 void (*write_counter)(int idx, u32 val);
57 void (*start)(void);
58 void (*stop)(void);
59 void (*reset)(void *);
60 int (*map_event)(struct perf_event *event);
61 int num_events;
62 atomic_t active_events;
63 struct mutex reserve_mutex;
64 u64 max_period;
65 struct platform_device *plat_device;
66 struct pmu_hw_events *(*get_hw_events)(void);
67};
68
69#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
70
71int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
72
73u64 armpmu_event_update(struct perf_event *event,
74 struct hw_perf_event *hwc,
75 int idx);
76
77int armpmu_event_set_period(struct perf_event *event,
78 struct hw_perf_event *hwc,
79 int idx);
80
81#endif /* CONFIG_HW_PERF_EVENTS */
82#endif /* __ASM_PMU_H */
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
new file mode 100644
index 000000000000..7cdf466fd0c5
--- /dev/null
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -0,0 +1,50 @@
1/*
2 * Based on arch/arm/include/asm/proc-fns.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2000 Deep Blue Solutions Ltd
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#ifndef __ASM_PROCFNS_H
21#define __ASM_PROCFNS_H
22
23#ifdef __KERNEL__
24#ifndef __ASSEMBLY__
25
26#include <asm/page.h>
27
28struct mm_struct;
29
30extern void cpu_cache_off(void);
31extern void cpu_do_idle(void);
32extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
33extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
34
35#include <asm/memory.h>
36
37#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
38
39#define cpu_get_pgd() \
40({ \
41 unsigned long pg; \
42 asm("mrs %0, ttbr0_el1\n" \
43 : "=r" (pg)); \
44 pg &= ~0xffff000000003ffful; \
45 (pgd_t *)phys_to_virt(pg); \
46})
47
48#endif /* __ASSEMBLY__ */
49#endif /* __KERNEL__ */
50#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
new file mode 100644
index 000000000000..39a208a392f7
--- /dev/null
+++ b/arch/arm64/include/asm/processor.h
@@ -0,0 +1,175 @@
1/*
2 * Based on arch/arm/include/asm/processor.h
3 *
4 * Copyright (C) 1995-1999 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PROCESSOR_H
20#define __ASM_PROCESSOR_H
21
22/*
23 * Default implementation of macro that returns current
24 * instruction pointer ("program counter").
25 */
26#define current_text_addr() ({ __label__ _l; _l: &&_l;})
27
28#ifdef __KERNEL__
29
30#include <linux/string.h>
31
32#include <asm/fpsimd.h>
33#include <asm/hw_breakpoint.h>
34#include <asm/ptrace.h>
35#include <asm/types.h>
36
37#ifdef __KERNEL__
38#define STACK_TOP_MAX TASK_SIZE_64
39#ifdef CONFIG_COMPAT
40#define AARCH32_VECTORS_BASE 0xffff0000
41#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
42 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
43#else
44#define STACK_TOP STACK_TOP_MAX
45#endif /* CONFIG_COMPAT */
46#endif /* __KERNEL__ */
47
48struct debug_info {
49 /* Have we suspended stepping by a debugger? */
50 int suspended_step;
51 /* Allow breakpoints and watchpoints to be disabled for this thread. */
52 int bps_disabled;
53 int wps_disabled;
54 /* Hardware breakpoints pinned to this task. */
55 struct perf_event *hbp_break[ARM_MAX_BRP];
56 struct perf_event *hbp_watch[ARM_MAX_WRP];
57};
58
59struct cpu_context {
60 unsigned long x19;
61 unsigned long x20;
62 unsigned long x21;
63 unsigned long x22;
64 unsigned long x23;
65 unsigned long x24;
66 unsigned long x25;
67 unsigned long x26;
68 unsigned long x27;
69 unsigned long x28;
70 unsigned long fp;
71 unsigned long sp;
72 unsigned long pc;
73};
74
75struct thread_struct {
76 struct cpu_context cpu_context; /* cpu context */
77 unsigned long tp_value;
78 struct fpsimd_state fpsimd_state;
79 unsigned long fault_address; /* fault info */
80 struct debug_info debug; /* debugging */
81};
82
83#define INIT_THREAD { }
84
85static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
86{
87 memset(regs, 0, sizeof(*regs));
88 regs->syscallno = ~0UL;
89 regs->pc = pc;
90}
91
92static inline void start_thread(struct pt_regs *regs, unsigned long pc,
93 unsigned long sp)
94{
95 unsigned long *stack = (unsigned long *)sp;
96
97 start_thread_common(regs, pc);
98 regs->pstate = PSR_MODE_EL0t;
99 regs->sp = sp;
100 regs->regs[2] = stack[2]; /* x2 (envp) */
101 regs->regs[1] = stack[1]; /* x1 (argv) */
102 regs->regs[0] = stack[0]; /* x0 (argc) */
103}
104
105#ifdef CONFIG_COMPAT
106static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
107 unsigned long sp)
108{
109 unsigned int *stack = (unsigned int *)sp;
110
111 start_thread_common(regs, pc);
112 regs->pstate = COMPAT_PSR_MODE_USR;
113 if (pc & 1)
114 regs->pstate |= COMPAT_PSR_T_BIT;
115 regs->compat_sp = sp;
116 regs->regs[2] = stack[2]; /* x2 (envp) */
117 regs->regs[1] = stack[1]; /* x1 (argv) */
118 regs->regs[0] = stack[0]; /* x0 (argc) */
119}
120#endif
121
122/* Forward declaration, a strange C thing */
123struct task_struct;
124
125/* Free all resources held by a thread. */
126extern void release_thread(struct task_struct *);
127
128/* Prepare to copy thread state - unlazy all lazy status */
129#define prepare_to_copy(tsk) do { } while (0)
130
131unsigned long get_wchan(struct task_struct *p);
132
133#define cpu_relax() barrier()
134
135/* Thread switching */
136extern struct task_struct *cpu_switch_to(struct task_struct *prev,
137 struct task_struct *next);
138
139/*
140 * Create a new kernel thread
141 */
142extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
143
144#define task_pt_regs(p) \
145 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
146
147#define KSTK_EIP(tsk) task_pt_regs(tsk)->pc
148#define KSTK_ESP(tsk) task_pt_regs(tsk)->sp
149
150/*
151 * Prefetching support
152 */
153#define ARCH_HAS_PREFETCH
154static inline void prefetch(const void *ptr)
155{
156 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
157}
158
159#define ARCH_HAS_PREFETCHW
160static inline void prefetchw(const void *ptr)
161{
162 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
163}
164
165#define ARCH_HAS_SPINLOCK_PREFETCH
166static inline void spin_lock_prefetch(const void *x)
167{
168 prefetchw(x);
169}
170
171#define HAVE_ARCH_PICK_MMAP_LAYOUT
172
173#endif
174
175#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h
new file mode 100644
index 000000000000..68b90e682957
--- /dev/null
+++ b/arch/arm64/include/asm/prom.h
@@ -0,0 +1 @@
/* Empty for now */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
new file mode 100644
index 000000000000..0fa5d6c9ef76
--- /dev/null
+++ b/arch/arm64/include/asm/ptrace.h
@@ -0,0 +1,207 @@
1/*
2 * Based on arch/arm/include/asm/ptrace.h
3 *
4 * Copyright (C) 1996-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PTRACE_H
20#define __ASM_PTRACE_H
21
22#include <linux/types.h>
23
24#include <asm/hwcap.h>
25
26/* AArch32-specific ptrace requests */
27#define COMPAT_PTRACE_GETREGS 12
28#define COMPAT_PTRACE_SETREGS 13
29#define COMPAT_PTRACE_GET_THREAD_AREA 22
30#define COMPAT_PTRACE_SET_SYSCALL 23
31#define COMPAT_PTRACE_GETVFPREGS 27
32#define COMPAT_PTRACE_SETVFPREGS 28
33#define COMPAT_PTRACE_GETHBPREGS 29
34#define COMPAT_PTRACE_SETHBPREGS 30
35
36/*
37 * PSR bits
38 */
39#define PSR_MODE_EL0t 0x00000000
40#define PSR_MODE_EL1t 0x00000004
41#define PSR_MODE_EL1h 0x00000005
42#define PSR_MODE_EL2t 0x00000008
43#define PSR_MODE_EL2h 0x00000009
44#define PSR_MODE_EL3t 0x0000000c
45#define PSR_MODE_EL3h 0x0000000d
46#define PSR_MODE_MASK 0x0000000f
47
48/* AArch32 CPSR bits */
49#define PSR_MODE32_BIT 0x00000010
50#define COMPAT_PSR_MODE_USR 0x00000010
51#define COMPAT_PSR_T_BIT 0x00000020
52#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
53
54/* AArch64 SPSR bits */
55#define PSR_F_BIT 0x00000040
56#define PSR_I_BIT 0x00000080
57#define PSR_A_BIT 0x00000100
58#define PSR_D_BIT 0x00000200
59#define PSR_Q_BIT 0x08000000
60#define PSR_V_BIT 0x10000000
61#define PSR_C_BIT 0x20000000
62#define PSR_Z_BIT 0x40000000
63#define PSR_N_BIT 0x80000000
64
65/*
66 * Groups of PSR bits
67 */
68#define PSR_f 0xff000000 /* Flags */
69#define PSR_s 0x00ff0000 /* Status */
70#define PSR_x 0x0000ff00 /* Extension */
71#define PSR_c 0x000000ff /* Control */
72
73/*
74 * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
75 * process is located in memory.
76 */
77#define PT_TEXT_ADDR 0x10000
78#define PT_DATA_ADDR 0x10004
79#define PT_TEXT_END_ADDR 0x10008
80
81#ifndef __ASSEMBLY__
82
83/*
84 * User structures for general purpose, floating point and debug registers.
85 */
86struct user_pt_regs {
87 __u64 regs[31];
88 __u64 sp;
89 __u64 pc;
90 __u64 pstate;
91};
92
93struct user_fpsimd_state {
94 __uint128_t vregs[32];
95 __u32 fpsr;
96 __u32 fpcr;
97};
98
99struct user_hwdebug_state {
100 __u32 dbg_info;
101 struct {
102 __u64 addr;
103 __u32 ctrl;
104 } dbg_regs[16];
105};
106
107#ifdef __KERNEL__
108
109/* sizeof(struct user) for AArch32 */
110#define COMPAT_USER_SZ 296
111/* AArch32 uses x13 as the stack pointer... */
112#define compat_sp regs[13]
113/* ... and x14 as the link register. */
114#define compat_lr regs[14]
115
116/*
117 * This struct defines the way the registers are stored on the stack during an
118 * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
119 * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
120 */
121struct pt_regs {
122 union {
123 struct user_pt_regs user_regs;
124 struct {
125 u64 regs[31];
126 u64 sp;
127 u64 pc;
128 u64 pstate;
129 };
130 };
131 u64 orig_x0;
132 u64 syscallno;
133};
134
135#define arch_has_single_step() (1)
136
137#ifdef CONFIG_COMPAT
138#define compat_thumb_mode(regs) \
139 (((regs)->pstate & COMPAT_PSR_T_BIT))
140#else
141#define compat_thumb_mode(regs) (0)
142#endif
143
144#define user_mode(regs) \
145 (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
146
147#define compat_user_mode(regs) \
148 (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
149 (PSR_MODE32_BIT | PSR_MODE_EL0t))
150
151#define processor_mode(regs) \
152 ((regs)->pstate & PSR_MODE_MASK)
153
154#define interrupts_enabled(regs) \
155 (!((regs)->pstate & PSR_I_BIT))
156
157#define fast_interrupts_enabled(regs) \
158 (!((regs)->pstate & PSR_F_BIT))
159
160#define user_stack_pointer(regs) \
161 ((regs)->sp)
162
163/*
164 * Are the current registers suitable for user mode? (used to maintain
165 * security in signal handlers)
166 */
167static inline int valid_user_regs(struct user_pt_regs *regs)
168{
169 if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) {
170 regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT);
171
172 /* The T bit is reserved for AArch64 */
173 if (!(regs->pstate & PSR_MODE32_BIT))
174 regs->pstate &= ~COMPAT_PSR_T_BIT;
175
176 return 1;
177 }
178
179 /*
180 * Force PSR to something logical...
181 */
182 regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \
183 COMPAT_PSR_T_BIT | PSR_MODE32_BIT;
184
185 if (!(regs->pstate & PSR_MODE32_BIT)) {
186 regs->pstate &= ~COMPAT_PSR_T_BIT;
187 regs->pstate |= PSR_MODE_EL0t;
188 }
189
190 return 0;
191}
192
193#define instruction_pointer(regs) (regs)->pc
194
195#ifdef CONFIG_SMP
196extern unsigned long profile_pc(struct pt_regs *regs);
197#else
198#define profile_pc(regs) instruction_pointer(regs)
199#endif
200
201extern int aarch32_break_trap(struct pt_regs *regs);
202
203#endif /* __KERNEL__ */
204
205#endif /* __ASSEMBLY__ */
206
207#endif
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
new file mode 100644
index 000000000000..9cf2e46fbbdf
--- /dev/null
+++ b/arch/arm64/include/asm/setup.h
@@ -0,0 +1,26 @@
1/*
2 * Based on arch/arm/include/asm/setup.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_SETUP_H
20#define __ASM_SETUP_H
21
22#include <linux/types.h>
23
24#define COMMAND_LINE_SIZE 2048
25
26#endif
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
new file mode 100644
index 000000000000..4df608a8459e
--- /dev/null
+++ b/arch/arm64/include/asm/shmparam.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SHMPARAM_H
17#define __ASM_SHMPARAM_H
18
19/*
20 * For IPC syscalls from compat tasks, we need to use the legacy 16k
21 * alignment value. Since we don't have aliasing D-caches, the rest of
22 * the time we can safely use PAGE_SIZE.
23 */
24#define COMPAT_SHMLBA 0x4000
25
26#include <asm-generic/shmparam.h>
27
28#endif /* __ASM_SHMPARAM_H */
diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h
new file mode 100644
index 000000000000..573cec778819
--- /dev/null
+++ b/arch/arm64/include/asm/sigcontext.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SIGCONTEXT_H
17#define __ASM_SIGCONTEXT_H
18
19#include <linux/types.h>
20
21/*
22 * Signal context structure - contains all info to do with the state
23 * before the signal handler was invoked.
24 */
25struct sigcontext {
26 __u64 fault_address;
27 /* AArch64 registers */
28 __u64 regs[31];
29 __u64 sp;
30 __u64 pc;
31 __u64 pstate;
32 /* 4K reserved for FP/SIMD state and future expansion */
33 __u8 __reserved[4096] __attribute__((__aligned__(16)));
34};
35
36/*
37 * Header to be used at the beginning of structures extending the user
38 * context. Such structures must be placed after the rt_sigframe on the stack
39 * and be 16-byte aligned. The last structure must be a dummy one with the
40 * magic and size set to 0.
41 */
42struct _aarch64_ctx {
43 __u32 magic;
44 __u32 size;
45};
46
47#define FPSIMD_MAGIC 0x46508001
48
49struct fpsimd_context {
50 struct _aarch64_ctx head;
51 __u32 fpsr;
52 __u32 fpcr;
53 __uint128_t vregs[32];
54};
55
56#ifdef __KERNEL__
57/*
58 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
59 * user space as it will change with the addition of new context. User space
60 * should check the magic/size information.
61 */
62struct aux_context {
63 struct fpsimd_context fpsimd;
64 /* additional context to be added before "end" */
65 struct _aarch64_ctx end;
66};
67#endif
68
69#endif
diff --git a/arch/arm64/include/asm/siginfo.h b/arch/arm64/include/asm/siginfo.h
new file mode 100644
index 000000000000..5a74a0853db0
--- /dev/null
+++ b/arch/arm64/include/asm/siginfo.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SIGINFO_H
17#define __ASM_SIGINFO_H
18
19#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
20
21#include <asm-generic/siginfo.h>
22
23#endif
diff --git a/arch/arm64/include/asm/signal.h b/arch/arm64/include/asm/signal.h
new file mode 100644
index 000000000000..8d1e7236431b
--- /dev/null
+++ b/arch/arm64/include/asm/signal.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SIGNAL_H
17#define __ASM_SIGNAL_H
18
19/* Required for AArch32 compatibility. */
20#define SA_RESTORER 0x04000000
21
22#include <asm-generic/signal.h>
23
24#endif
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
new file mode 100644
index 000000000000..7c275e3b640f
--- /dev/null
+++ b/arch/arm64/include/asm/signal32.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SIGNAL32_H
17#define __ASM_SIGNAL32_H
18
19#ifdef __KERNEL__
20#ifdef CONFIG_COMPAT
21#include <linux/compat.h>
22
23#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
24
25extern const compat_ulong_t aarch32_sigret_code[6];
26
27int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
28 struct pt_regs *regs);
29int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
30 sigset_t *set, struct pt_regs *regs);
31
32void compat_setup_restart_syscall(struct pt_regs *regs);
33#else
34
35static inline int compat_setup_frame(int usid, struct k_sigaction *ka,
36 sigset_t *set, struct pt_regs *regs)
37{
38 return -ENOSYS;
39}
40
41static inline int compat_setup_rt_frame(int usig, struct k_sigaction *ka,
42 siginfo_t *info, sigset_t *set,
43 struct pt_regs *regs)
44{
45 return -ENOSYS;
46}
47
48static inline void compat_setup_restart_syscall(struct pt_regs *regs)
49{
50}
51#endif /* CONFIG_COMPAT */
52#endif /* __KERNEL__ */
53#endif /* __ASM_SIGNAL32_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
new file mode 100644
index 000000000000..7e34295f78e3
--- /dev/null
+++ b/arch/arm64/include/asm/smp.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SMP_H
17#define __ASM_SMP_H
18
19#include <linux/threads.h>
20#include <linux/cpumask.h>
21#include <linux/thread_info.h>
22
23#ifndef CONFIG_SMP
24# error "<asm/smp.h> included in non-SMP build"
25#endif
26
27#define raw_smp_processor_id() (current_thread_info()->cpu)
28
29struct seq_file;
30
31/*
32 * generate IPI list text
33 */
34extern void show_ipi_list(struct seq_file *p, int prec);
35
36/*
37 * Called from C code, this handles an IPI.
38 */
39extern void handle_IPI(int ipinr, struct pt_regs *regs);
40
41/*
42 * Setup the set of possible CPUs (via set_cpu_possible)
43 */
44extern void smp_init_cpus(void);
45
46/*
47 * Provide a function to raise an IPI cross call on CPUs in callmap.
48 */
49extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
50
51/*
52 * Called from the secondary holding pen, this is the secondary CPU entry point.
53 */
54asmlinkage void secondary_start_kernel(void);
55
56/*
57 * Initial data for bringing up a secondary CPU.
58 */
59struct secondary_data {
60 void *stack;
61};
62extern struct secondary_data secondary_data;
63extern void secondary_holding_pen(void);
64extern volatile unsigned long secondary_holding_pen_release;
65
66extern void arch_send_call_function_single_ipi(int cpu);
67extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
68
69#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
new file mode 100644
index 000000000000..1be62bcb9d47
--- /dev/null
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SPARSEMEM_H
17#define __ASM_SPARSEMEM_H
18
19#ifdef CONFIG_SPARSEMEM
20#define MAX_PHYSMEM_BITS 40
21#define SECTION_SIZE_BITS 30
22#endif
23
24#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
new file mode 100644
index 000000000000..41112fe2f8b1
--- /dev/null
+++ b/arch/arm64/include/asm/spinlock.h
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SPINLOCK_H
17#define __ASM_SPINLOCK_H
18
19#include <asm/spinlock_types.h>
20#include <asm/processor.h>
21
22/*
23 * Spinlock implementation.
24 *
25 * The old value is read exclusively and the new one, if unlocked, is written
26 * exclusively. In case of failure, the loop is restarted.
27 *
28 * The memory barriers are implicit with the load-acquire and store-release
29 * instructions.
30 *
31 * Unlocked value: 0
32 * Locked value: 1
33 */
34
35#define arch_spin_is_locked(x) ((x)->lock != 0)
36#define arch_spin_unlock_wait(lock) \
37 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
38
39#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
40
41static inline void arch_spin_lock(arch_spinlock_t *lock)
42{
43 unsigned int tmp;
44
45 asm volatile(
46 " sevl\n"
47 "1: wfe\n"
48 "2: ldaxr %w0, [%1]\n"
49 " cbnz %w0, 1b\n"
50 " stxr %w0, %w2, [%1]\n"
51 " cbnz %w0, 2b\n"
52 : "=&r" (tmp)
53 : "r" (&lock->lock), "r" (1)
54 : "memory");
55}
56
57static inline int arch_spin_trylock(arch_spinlock_t *lock)
58{
59 unsigned int tmp;
60
61 asm volatile(
62 " ldaxr %w0, [%1]\n"
63 " cbnz %w0, 1f\n"
64 " stxr %w0, %w2, [%1]\n"
65 "1:\n"
66 : "=&r" (tmp)
67 : "r" (&lock->lock), "r" (1)
68 : "memory");
69
70 return !tmp;
71}
72
73static inline void arch_spin_unlock(arch_spinlock_t *lock)
74{
75 asm volatile(
76 " stlr %w1, [%0]\n"
77 : : "r" (&lock->lock), "r" (0) : "memory");
78}
79
80/*
81 * Write lock implementation.
82 *
83 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
84 * exclusively held.
85 *
86 * The memory barriers are implicit with the load-acquire and store-release
87 * instructions.
88 */
89
90static inline void arch_write_lock(arch_rwlock_t *rw)
91{
92 unsigned int tmp;
93
94 asm volatile(
95 " sevl\n"
96 "1: wfe\n"
97 "2: ldaxr %w0, [%1]\n"
98 " cbnz %w0, 1b\n"
99 " stxr %w0, %w2, [%1]\n"
100 " cbnz %w0, 2b\n"
101 : "=&r" (tmp)
102 : "r" (&rw->lock), "r" (0x80000000)
103 : "memory");
104}
105
106static inline int arch_write_trylock(arch_rwlock_t *rw)
107{
108 unsigned int tmp;
109
110 asm volatile(
111 " ldaxr %w0, [%1]\n"
112 " cbnz %w0, 1f\n"
113 " stxr %w0, %w2, [%1]\n"
114 "1:\n"
115 : "=&r" (tmp)
116 : "r" (&rw->lock), "r" (0x80000000)
117 : "memory");
118
119 return !tmp;
120}
121
122static inline void arch_write_unlock(arch_rwlock_t *rw)
123{
124 asm volatile(
125 " stlr %w1, [%0]\n"
126 : : "r" (&rw->lock), "r" (0) : "memory");
127}
128
129/* write_can_lock - would write_trylock() succeed? */
130#define arch_write_can_lock(x) ((x)->lock == 0)
131
132/*
133 * Read lock implementation.
134 *
135 * It exclusively loads the lock value, increments it and stores the new value
136 * back if positive and the CPU still exclusively owns the location. If the
137 * value is negative, the lock is already held.
138 *
139 * During unlocking there may be multiple active read locks but no write lock.
140 *
141 * The memory barriers are implicit with the load-acquire and store-release
142 * instructions.
143 */
144static inline void arch_read_lock(arch_rwlock_t *rw)
145{
146 unsigned int tmp, tmp2;
147
148 asm volatile(
149 " sevl\n"
150 "1: wfe\n"
151 "2: ldaxr %w0, [%2]\n"
152 " add %w0, %w0, #1\n"
153 " tbnz %w0, #31, 1b\n"
154 " stxr %w1, %w0, [%2]\n"
155 " cbnz %w1, 2b\n"
156 : "=&r" (tmp), "=&r" (tmp2)
157 : "r" (&rw->lock)
158 : "memory");
159}
160
161static inline void arch_read_unlock(arch_rwlock_t *rw)
162{
163 unsigned int tmp, tmp2;
164
165 asm volatile(
166 "1: ldxr %w0, [%2]\n"
167 " sub %w0, %w0, #1\n"
168 " stlxr %w1, %w0, [%2]\n"
169 " cbnz %w1, 1b\n"
170 : "=&r" (tmp), "=&r" (tmp2)
171 : "r" (&rw->lock)
172 : "memory");
173}
174
175static inline int arch_read_trylock(arch_rwlock_t *rw)
176{
177 unsigned int tmp, tmp2 = 1;
178
179 asm volatile(
180 " ldaxr %w0, [%2]\n"
181 " add %w0, %w0, #1\n"
182 " tbnz %w0, #31, 1f\n"
183 " stxr %w1, %w0, [%2]\n"
184 "1:\n"
185 : "=&r" (tmp), "+r" (tmp2)
186 : "r" (&rw->lock)
187 : "memory");
188
189 return !tmp2;
190}
191
192/* read_can_lock - would read_trylock() succeed? */
193#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
194
195#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
196#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
197
198#define arch_spin_relax(lock) cpu_relax()
199#define arch_read_relax(lock) cpu_relax()
200#define arch_write_relax(lock) cpu_relax()
201
202#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..9a494346efed
--- /dev/null
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SPINLOCK_TYPES_H
17#define __ASM_SPINLOCK_TYPES_H
18
19#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
20# error "please don't include this file directly"
21#endif
22
23/* We only require natural alignment for exclusive accesses. */
24#define __lock_aligned
25
26typedef struct {
27 volatile unsigned int lock;
28} arch_spinlock_t;
29
30#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
31
32typedef struct {
33 volatile unsigned int lock;
34} arch_rwlock_t;
35
36#define __ARCH_RW_LOCK_UNLOCKED { 0 }
37
38#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
new file mode 100644
index 000000000000..7318f6d54aa9
--- /dev/null
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_STACKTRACE_H
17#define __ASM_STACKTRACE_H
18
19struct stackframe {
20 unsigned long fp;
21 unsigned long sp;
22 unsigned long pc;
23};
24
25extern int unwind_frame(struct stackframe *frame);
26extern void walk_stackframe(struct stackframe *frame,
27 int (*fn)(struct stackframe *, void *), void *data);
28
29#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h
new file mode 100644
index 000000000000..d87225cbead8
--- /dev/null
+++ b/arch/arm64/include/asm/stat.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_STAT_H
17#define __ASM_STAT_H
18
19#include <asm-generic/stat.h>
20
21#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
22
23#include <asm/compat.h>
24
25/*
26 * struct stat64 is needed for compat tasks only. Its definition is different
27 * from the generic struct stat64.
28 */
29struct stat64 {
30 compat_u64 st_dev;
31 unsigned char __pad0[4];
32
33#define STAT64_HAS_BROKEN_ST_INO 1
34 compat_ulong_t __st_ino;
35 compat_uint_t st_mode;
36 compat_uint_t st_nlink;
37
38 compat_ulong_t st_uid;
39 compat_ulong_t st_gid;
40
41 compat_u64 st_rdev;
42 unsigned char __pad3[4];
43
44 compat_s64 st_size;
45 compat_ulong_t st_blksize;
46 compat_u64 st_blocks; /* Number of 512-byte blocks allocated. */
47
48 compat_ulong_t st_atime;
49 compat_ulong_t st_atime_nsec;
50
51 compat_ulong_t st_mtime;
52 compat_ulong_t st_mtime_nsec;
53
54 compat_ulong_t st_ctime;
55 compat_ulong_t st_ctime_nsec;
56
57 compat_u64 st_ino;
58};
59
60#endif
61
62#endif
diff --git a/arch/arm64/include/asm/statfs.h b/arch/arm64/include/asm/statfs.h
new file mode 100644
index 000000000000..6f6219050978
--- /dev/null
+++ b/arch/arm64/include/asm/statfs.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_STATFS_H
17#define __ASM_STATFS_H
18
19#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
20
21#include <asm-generic/statfs.h>
22
23#endif
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
new file mode 100644
index 000000000000..89c047f9a971
--- /dev/null
+++ b/arch/arm64/include/asm/syscall.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SYSCALL_H
17#define __ASM_SYSCALL_H
18
19#include <linux/err.h>
20
21
22static inline int syscall_get_nr(struct task_struct *task,
23 struct pt_regs *regs)
24{
25 return regs->syscallno;
26}
27
28static inline void syscall_rollback(struct task_struct *task,
29 struct pt_regs *regs)
30{
31 regs->regs[0] = regs->orig_x0;
32}
33
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->regs[0];
39 return IS_ERR_VALUE(error) ? error : 0;
40}
41
42static inline long syscall_get_return_value(struct task_struct *task,
43 struct pt_regs *regs)
44{
45 return regs->regs[0];
46}
47
48static inline void syscall_set_return_value(struct task_struct *task,
49 struct pt_regs *regs,
50 int error, long val)
51{
52 regs->regs[0] = (long) error ? error : val;
53}
54
55#define SYSCALL_MAX_ARGS 6
56
57static inline void syscall_get_arguments(struct task_struct *task,
58 struct pt_regs *regs,
59 unsigned int i, unsigned int n,
60 unsigned long *args)
61{
62 if (i + n > SYSCALL_MAX_ARGS) {
63 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
64 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
65 pr_warning("%s called with max args %d, handling only %d\n",
66 __func__, i + n, SYSCALL_MAX_ARGS);
67 memset(args_bad, 0, n_bad * sizeof(args[0]));
68 }
69
70 if (i == 0) {
71 args[0] = regs->orig_x0;
72 args++;
73 i++;
74 n--;
75 }
76
77 memcpy(args, &regs->regs[i], n * sizeof(args[0]));
78}
79
80static inline void syscall_set_arguments(struct task_struct *task,
81 struct pt_regs *regs,
82 unsigned int i, unsigned int n,
83 const unsigned long *args)
84{
85 if (i + n > SYSCALL_MAX_ARGS) {
86 pr_warning("%s called with max args %d, handling only %d\n",
87 __func__, i + n, SYSCALL_MAX_ARGS);
88 n = SYSCALL_MAX_ARGS - i;
89 }
90
91 if (i == 0) {
92 regs->orig_x0 = args[0];
93 args++;
94 i++;
95 n--;
96 }
97
98 memcpy(&regs->regs[i], args, n * sizeof(args[0]));
99}
100
101#endif /* __ASM_SYSCALL_H */
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h
new file mode 100644
index 000000000000..09ff33572aab
--- /dev/null
+++ b/arch/arm64/include/asm/syscalls.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SYSCALLS_H
17#define __ASM_SYSCALLS_H
18
19#include <linux/linkage.h>
20#include <linux/compiler.h>
21#include <linux/signal.h>
22
23/*
24 * System call wrappers implemented in kernel/entry.S.
25 */
26asmlinkage long sys_execve_wrapper(const char __user *filename,
27 const char __user *const __user *argv,
28 const char __user *const __user *envp);
29asmlinkage long sys_clone_wrapper(unsigned long clone_flags,
30 unsigned long newsp,
31 void __user *parent_tid,
32 unsigned long tls_val,
33 void __user *child_tid);
34asmlinkage long sys_rt_sigreturn_wrapper(void);
35asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss,
36 stack_t __user *uoss);
37
38#include <asm-generic/syscalls.h>
39
40#endif /* __ASM_SYSCALLS_H */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
new file mode 100644
index 000000000000..95e407255347
--- /dev/null
+++ b/arch/arm64/include/asm/system_misc.h
@@ -0,0 +1,54 @@
1/*
2 * Based on arch/arm/include/asm/system_misc.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_SYSTEM_MISC_H
19#define __ASM_SYSTEM_MISC_H
20
21#ifndef __ASSEMBLY__
22
23#include <linux/compiler.h>
24#include <linux/linkage.h>
25#include <linux/irqflags.h>
26
27struct pt_regs;
28
29void die(const char *msg, struct pt_regs *regs, int err);
30
31struct siginfo;
32void arm64_notify_die(const char *str, struct pt_regs *regs,
33 struct siginfo *info, int err);
34
35void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
36 struct pt_regs *),
37 int sig, int code, const char *name);
38
39struct mm_struct;
40extern void show_pte(struct mm_struct *mm, unsigned long addr);
41extern void __show_regs(struct pt_regs *);
42
43void soft_restart(unsigned long);
44extern void (*pm_restart)(const char *cmd);
45
46#define UDBG_UNDEFINED (1 << 0)
47#define UDBG_SYSCALL (1 << 1)
48#define UDBG_BADABORT (1 << 2)
49#define UDBG_SEGV (1 << 3)
50#define UDBG_BUS (1 << 4)
51
52#endif /* __ASSEMBLY__ */
53
54#endif /* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
new file mode 100644
index 000000000000..3659e460071d
--- /dev/null
+++ b/arch/arm64/include/asm/thread_info.h
@@ -0,0 +1,127 @@
1/*
2 * Based on arch/arm/include/asm/thread_info.h
3 *
4 * Copyright (C) 2002 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_THREAD_INFO_H
20#define __ASM_THREAD_INFO_H
21
22#ifdef __KERNEL__
23
24#include <linux/compiler.h>
25
26#ifndef CONFIG_ARM64_64K_PAGES
27#define THREAD_SIZE_ORDER 1
28#endif
29
30#define THREAD_SIZE 8192
31#define THREAD_START_SP (THREAD_SIZE - 16)
32
33#ifndef __ASSEMBLY__
34
35struct task_struct;
36struct exec_domain;
37
38#include <asm/types.h>
39
40typedef unsigned long mm_segment_t;
41
42/*
43 * low level task data that entry.S needs immediate access to.
44 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
45 */
46struct thread_info {
47 unsigned long flags; /* low level flags */
48 mm_segment_t addr_limit; /* address limit */
49 struct task_struct *task; /* main task structure */
50 struct exec_domain *exec_domain; /* execution domain */
51 struct restart_block restart_block;
52 int preempt_count; /* 0 => preemptable, <0 => bug */
53 int cpu; /* cpu */
54};
55
56#define INIT_THREAD_INFO(tsk) \
57{ \
58 .task = &tsk, \
59 .exec_domain = &default_exec_domain, \
60 .flags = 0, \
61 .preempt_count = INIT_PREEMPT_COUNT, \
62 .addr_limit = KERNEL_DS, \
63 .restart_block = { \
64 .fn = do_no_restart_syscall, \
65 }, \
66}
67
68#define init_thread_info (init_thread_union.thread_info)
69#define init_stack (init_thread_union.stack)
70
71/*
72 * how to get the thread information struct from C
73 */
74static inline struct thread_info *current_thread_info(void) __attribute_const__;
75
76static inline struct thread_info *current_thread_info(void)
77{
78 register unsigned long sp asm ("sp");
79 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
80}
81
82#define thread_saved_pc(tsk) \
83 ((unsigned long)(tsk->thread.cpu_context.pc))
84#define thread_saved_sp(tsk) \
85 ((unsigned long)(tsk->thread.cpu_context.sp))
86#define thread_saved_fp(tsk) \
87 ((unsigned long)(tsk->thread.cpu_context.fp))
88
89#endif
90
91/*
92 * We use bit 30 of the preempt_count to indicate that kernel
93 * preemption is occurring. See <asm/hardirq.h>.
94 */
95#define PREEMPT_ACTIVE 0x40000000
96
97/*
98 * thread information flags:
99 * TIF_SYSCALL_TRACE - syscall trace active
100 * TIF_SIGPENDING - signal pending
101 * TIF_NEED_RESCHED - rescheduling necessary
102 * TIF_NOTIFY_RESUME - callback before returning to user
103 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
104 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
105 */
106#define TIF_SIGPENDING 0
107#define TIF_NEED_RESCHED 1
108#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
109#define TIF_SYSCALL_TRACE 8
110#define TIF_POLLING_NRFLAG 16
111#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
112#define TIF_FREEZE 19
113#define TIF_RESTORE_SIGMASK 20
114#define TIF_SINGLESTEP 21
115#define TIF_32BIT 22 /* 32bit process */
116#define TIF_SWITCH_MM 23 /* deferred switch_mm */
117
118#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
119#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
120#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
121#define _TIF_32BIT (1 << TIF_32BIT)
122
123#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
124 _TIF_NOTIFY_RESUME)
125
126#endif /* __KERNEL__ */
127#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
new file mode 100644
index 000000000000..b24a31a7e2c9
--- /dev/null
+++ b/arch/arm64/include/asm/timex.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_TIMEX_H
17#define __ASM_TIMEX_H
18
19/*
20 * Use the current timer as a cycle counter since this is what we use for
21 * the delay loop.
22 */
23#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; })
24
25#include <asm-generic/timex.h>
26
27#define ARCH_HAS_READ_CURRENT_TIMER
28
29#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
new file mode 100644
index 000000000000..654f0968030b
--- /dev/null
+++ b/arch/arm64/include/asm/tlb.h
@@ -0,0 +1,190 @@
1/*
2 * Based on arch/arm/include/asm/tlb.h
3 *
4 * Copyright (C) 2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_TLB_H
20#define __ASM_TLB_H
21
22#include <linux/pagemap.h>
23#include <linux/swap.h>
24
25#include <asm/pgalloc.h>
26#include <asm/tlbflush.h>
27
28#define MMU_GATHER_BUNDLE 8
29
30/*
31 * TLB handling. This allows us to remove pages from the page
32 * tables, and efficiently handle the TLB issues.
33 */
34struct mmu_gather {
35 struct mm_struct *mm;
36 unsigned int fullmm;
37 struct vm_area_struct *vma;
38 unsigned long range_start;
39 unsigned long range_end;
40 unsigned int nr;
41 unsigned int max;
42 struct page **pages;
43 struct page *local[MMU_GATHER_BUNDLE];
44};
45
46/*
47 * This is unnecessarily complex. There's three ways the TLB shootdown
48 * code is used:
49 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
50 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
51 * tlb->vma will be non-NULL.
52 * 2. Unmapping all vmas. See exit_mmap().
53 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
54 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
55 * 3. Unmapping argument pages. See shift_arg_pages().
56 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
57 * tlb->vma will be NULL.
58 */
59static inline void tlb_flush(struct mmu_gather *tlb)
60{
61 if (tlb->fullmm || !tlb->vma)
62 flush_tlb_mm(tlb->mm);
63 else if (tlb->range_end > 0) {
64 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
65 tlb->range_start = TASK_SIZE;
66 tlb->range_end = 0;
67 }
68}
69
70static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
71{
72 if (!tlb->fullmm) {
73 if (addr < tlb->range_start)
74 tlb->range_start = addr;
75 if (addr + PAGE_SIZE > tlb->range_end)
76 tlb->range_end = addr + PAGE_SIZE;
77 }
78}
79
80static inline void __tlb_alloc_page(struct mmu_gather *tlb)
81{
82 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
83
84 if (addr) {
85 tlb->pages = (void *)addr;
86 tlb->max = PAGE_SIZE / sizeof(struct page *);
87 }
88}
89
90static inline void tlb_flush_mmu(struct mmu_gather *tlb)
91{
92 tlb_flush(tlb);
93 free_pages_and_swap_cache(tlb->pages, tlb->nr);
94 tlb->nr = 0;
95 if (tlb->pages == tlb->local)
96 __tlb_alloc_page(tlb);
97}
98
99static inline void
100tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
101{
102 tlb->mm = mm;
103 tlb->fullmm = fullmm;
104 tlb->vma = NULL;
105 tlb->max = ARRAY_SIZE(tlb->local);
106 tlb->pages = tlb->local;
107 tlb->nr = 0;
108 __tlb_alloc_page(tlb);
109}
110
111static inline void
112tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
113{
114 tlb_flush_mmu(tlb);
115
116 /* keep the page table cache within bounds */
117 check_pgt_cache();
118
119 if (tlb->pages != tlb->local)
120 free_pages((unsigned long)tlb->pages, 0);
121}
122
123/*
124 * Memorize the range for the TLB flush.
125 */
126static inline void
127tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
128{
129 tlb_add_flush(tlb, addr);
130}
131
132/*
133 * In the case of tlb vma handling, we can optimise these away in the
134 * case where we're doing a full MM flush. When we're doing a munmap,
135 * the vmas are adjusted to only cover the region to be torn down.
136 */
137static inline void
138tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
139{
140 if (!tlb->fullmm) {
141 tlb->vma = vma;
142 tlb->range_start = TASK_SIZE;
143 tlb->range_end = 0;
144 }
145}
146
147static inline void
148tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
149{
150 if (!tlb->fullmm)
151 tlb_flush(tlb);
152}
153
154static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
155{
156 tlb->pages[tlb->nr++] = page;
157 VM_BUG_ON(tlb->nr > tlb->max);
158 return tlb->max - tlb->nr;
159}
160
161static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
162{
163 if (!__tlb_remove_page(tlb, page))
164 tlb_flush_mmu(tlb);
165}
166
167static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
168 unsigned long addr)
169{
170 pgtable_page_dtor(pte);
171 tlb_add_flush(tlb, addr);
172 tlb_remove_page(tlb, pte);
173}
174
175#ifndef CONFIG_ARM64_64K_PAGES
176static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
177 unsigned long addr)
178{
179 tlb_add_flush(tlb, addr);
180 tlb_remove_page(tlb, virt_to_page(pmdp));
181}
182#endif
183
184#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
185#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
186#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
187
188#define tlb_migrate_finish(mm) do { } while (0)
189
190#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
new file mode 100644
index 000000000000..122d6320f745
--- /dev/null
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -0,0 +1,122 @@
1/*
2 * Based on arch/arm/include/asm/tlbflush.h
3 *
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_TLBFLUSH_H
20#define __ASM_TLBFLUSH_H
21
22#ifndef __ASSEMBLY__
23
24#include <linux/sched.h>
25#include <asm/cputype.h>
26
27extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
28extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
29
30extern struct cpu_tlb_fns cpu_tlb;
31
32/*
33 * TLB Management
34 * ==============
35 *
36 * The arch/arm64/mm/tlb.S files implement these methods.
37 *
38 * The TLB specific code is expected to perform whatever tests it needs
39 * to determine if it should invalidate the TLB for each call. Start
40 * addresses are inclusive and end addresses are exclusive; it is safe to
41 * round these addresses down.
42 *
43 * flush_tlb_all()
44 *
45 * Invalidate the entire TLB.
46 *
47 * flush_tlb_mm(mm)
48 *
49 * Invalidate all TLB entries in a particular address space.
50 * - mm - mm_struct describing address space
51 *
52 * flush_tlb_range(mm,start,end)
53 *
54 * Invalidate a range of TLB entries in the specified address
55 * space.
56 * - mm - mm_struct describing address space
57 * - start - start address (may not be aligned)
58 * - end - end address (exclusive, may not be aligned)
59 *
60 * flush_tlb_page(vaddr,vma)
61 *
62 * Invalidate the specified page in the specified address range.
63 * - vaddr - virtual address (may not be aligned)
64 * - vma - vma_struct describing address range
65 *
66 * flush_kern_tlb_page(kaddr)
67 *
68 * Invalidate the TLB entry for the specified page. The address
69 * will be in the kernels virtual memory space. Current uses
70 * only require the D-TLB to be invalidated.
71 * - kaddr - Kernel virtual memory address
72 */
73static inline void flush_tlb_all(void)
74{
75 dsb();
76 asm("tlbi vmalle1is");
77 dsb();
78 isb();
79}
80
81static inline void flush_tlb_mm(struct mm_struct *mm)
82{
83 unsigned long asid = (unsigned long)ASID(mm) << 48;
84
85 dsb();
86 asm("tlbi aside1is, %0" : : "r" (asid));
87 dsb();
88}
89
90static inline void flush_tlb_page(struct vm_area_struct *vma,
91 unsigned long uaddr)
92{
93 unsigned long addr = uaddr >> 12 |
94 ((unsigned long)ASID(vma->vm_mm) << 48);
95
96 dsb();
97 asm("tlbi vae1is, %0" : : "r" (addr));
98 dsb();
99}
100
101/*
102 * Convert calls to our calling convention.
103 */
104#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
105#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
106
107/*
108 * On AArch64, the cache coherency is handled via the set_pte_at() function.
109 */
110static inline void update_mmu_cache(struct vm_area_struct *vma,
111 unsigned long addr, pte_t *ptep)
112{
113 /*
114 * set_pte() does not have a DSB, so make sure that the page table
115 * write is visible.
116 */
117 dsb();
118}
119
120#endif
121
122#endif
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
new file mode 100644
index 000000000000..10ca8ff93cc2
--- /dev/null
+++ b/arch/arm64/include/asm/traps.h
@@ -0,0 +1,30 @@
1/*
2 * Based on arch/arm/include/asm/traps.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_TRAP_H
19#define __ASM_TRAP_H
20
21static inline int in_exception_text(unsigned long ptr)
22{
23 extern char __exception_text_start[];
24 extern char __exception_text_end[];
25
26 return ptr >= (unsigned long)&__exception_text_start &&
27 ptr < (unsigned long)&__exception_text_end;
28}
29
30#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
new file mode 100644
index 000000000000..008f8481da65
--- /dev/null
+++ b/arch/arm64/include/asm/uaccess.h
@@ -0,0 +1,297 @@
1/*
2 * Based on arch/arm/include/asm/uaccess.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_UACCESS_H
19#define __ASM_UACCESS_H
20
21/*
22 * User space memory access functions
23 */
24#include <linux/string.h>
25#include <linux/thread_info.h>
26
27#include <asm/ptrace.h>
28#include <asm/errno.h>
29#include <asm/memory.h>
30#include <asm/compiler.h>
31
32#define VERIFY_READ 0
33#define VERIFY_WRITE 1
34
35/*
36 * The exception table consists of pairs of addresses: the first is the
37 * address of an instruction that is allowed to fault, and the second is
38 * the address at which the program should continue. No registers are
39 * modified, so it is entirely up to the continuation code to figure out
40 * what to do.
41 *
42 * All the routines below use bits of fixup code that are out of line
43 * with the main instruction path. This means when everything is well,
44 * we don't even have to jump over them. Further, they do not intrude
45 * on our cache or tlb entries.
46 */
47
48struct exception_table_entry
49{
50 unsigned long insn, fixup;
51};
52
53extern int fixup_exception(struct pt_regs *regs);
54
55#define KERNEL_DS (-1UL)
56#define get_ds() (KERNEL_DS)
57
58#define USER_DS TASK_SIZE_64
59#define get_fs() (current_thread_info()->addr_limit)
60
61static inline void set_fs(mm_segment_t fs)
62{
63 current_thread_info()->addr_limit = fs;
64}
65
66#define segment_eq(a,b) ((a) == (b))
67
68/*
69 * Return 1 if addr < current->addr_limit, 0 otherwise.
70 */
71#define __addr_ok(addr) \
72({ \
73 unsigned long flag; \
74 asm("cmp %1, %0; cset %0, lo" \
75 : "=&r" (flag) \
76 : "r" (addr), "0" (current_thread_info()->addr_limit) \
77 : "cc"); \
78 flag; \
79})
80
81/*
82 * Test whether a block of memory is a valid user space address.
83 * Returns 1 if the range is valid, 0 otherwise.
84 *
85 * This is equivalent to the following test:
86 * (u65)addr + (u65)size < (u65)current->addr_limit
87 *
88 * This needs 65-bit arithmetic.
89 */
90#define __range_ok(addr, size) \
91({ \
92 unsigned long flag, roksum; \
93 __chk_user_ptr(addr); \
94 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
95 : "=&r" (flag), "=&r" (roksum) \
96 : "1" (addr), "Ir" (size), \
97 "r" (current_thread_info()->addr_limit) \
98 : "cc"); \
99 flag; \
100})
101
102#define access_ok(type, addr, size) __range_ok(addr, size)
103
104/*
105 * The "__xxx" versions of the user access functions do not verify the address
106 * space - it must have been done previously with a separate "access_ok()"
107 * call.
108 *
109 * The "__xxx_error" versions set the third argument to -EFAULT if an error
110 * occurs, and leave it unchanged on success.
111 */
112#define __get_user_asm(instr, reg, x, addr, err) \
113 asm volatile( \
114 "1: " instr " " reg "1, [%2]\n" \
115 "2:\n" \
116 " .section .fixup, \"ax\"\n" \
117 " .align 2\n" \
118 "3: mov %w0, %3\n" \
119 " mov %1, #0\n" \
120 " b 2b\n" \
121 " .previous\n" \
122 " .section __ex_table,\"a\"\n" \
123 " .align 3\n" \
124 " .quad 1b, 3b\n" \
125 " .previous" \
126 : "+r" (err), "=&r" (x) \
127 : "r" (addr), "i" (-EFAULT))
128
129#define __get_user_err(x, ptr, err) \
130do { \
131 unsigned long __gu_val; \
132 __chk_user_ptr(ptr); \
133 switch (sizeof(*(ptr))) { \
134 case 1: \
135 __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
136 break; \
137 case 2: \
138 __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
139 break; \
140 case 4: \
141 __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
142 break; \
143 case 8: \
144 __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
145 break; \
146 default: \
147 BUILD_BUG(); \
148 } \
149 (x) = (__typeof__(*(ptr)))__gu_val; \
150} while (0)
151
152#define __get_user(x, ptr) \
153({ \
154 int __gu_err = 0; \
155 __get_user_err((x), (ptr), __gu_err); \
156 __gu_err; \
157})
158
159#define __get_user_error(x, ptr, err) \
160({ \
161 __get_user_err((x), (ptr), (err)); \
162 (void)0; \
163})
164
165#define __get_user_unaligned __get_user
166
167#define get_user(x, ptr) \
168({ \
169 might_sleep(); \
170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
171 __get_user((x), (ptr)) : \
172 ((x) = 0, -EFAULT); \
173})
174
175#define __put_user_asm(instr, reg, x, addr, err) \
176 asm volatile( \
177 "1: " instr " " reg "1, [%2]\n" \
178 "2:\n" \
179 " .section .fixup,\"ax\"\n" \
180 " .align 2\n" \
181 "3: mov %w0, %3\n" \
182 " b 2b\n" \
183 " .previous\n" \
184 " .section __ex_table,\"a\"\n" \
185 " .align 3\n" \
186 " .quad 1b, 3b\n" \
187 " .previous" \
188 : "+r" (err) \
189 : "r" (x), "r" (addr), "i" (-EFAULT))
190
191#define __put_user_err(x, ptr, err) \
192do { \
193 __typeof__(*(ptr)) __pu_val = (x); \
194 __chk_user_ptr(ptr); \
195 switch (sizeof(*(ptr))) { \
196 case 1: \
197 __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
198 break; \
199 case 2: \
200 __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
201 break; \
202 case 4: \
203 __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
204 break; \
205 case 8: \
206 __put_user_asm("str", "%", __pu_val, (ptr), (err)); \
207 break; \
208 default: \
209 BUILD_BUG(); \
210 } \
211} while (0)
212
213#define __put_user(x, ptr) \
214({ \
215 int __pu_err = 0; \
216 __put_user_err((x), (ptr), __pu_err); \
217 __pu_err; \
218})
219
220#define __put_user_error(x, ptr, err) \
221({ \
222 __put_user_err((x), (ptr), (err)); \
223 (void)0; \
224})
225
226#define __put_user_unaligned __put_user
227
228#define put_user(x, ptr) \
229({ \
230 might_sleep(); \
231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
232 __put_user((x), (ptr)) : \
233 -EFAULT; \
234})
235
236extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
237extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
238extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
239extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
240
241extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
242extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
243
244static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
245{
246 if (access_ok(VERIFY_READ, from, n))
247 n = __copy_from_user(to, from, n);
248 else /* security hole - plug it */
249 memset(to, 0, n);
250 return n;
251}
252
253static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
254{
255 if (access_ok(VERIFY_WRITE, to, n))
256 n = __copy_to_user(to, from, n);
257 return n;
258}
259
260static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
261{
262 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
263 n = __copy_in_user(to, from, n);
264 return n;
265}
266
267#define __copy_to_user_inatomic __copy_to_user
268#define __copy_from_user_inatomic __copy_from_user
269
270static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
271{
272 if (access_ok(VERIFY_WRITE, to, n))
273 n = __clear_user(to, n);
274 return n;
275}
276
277static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
278{
279 long res = -EFAULT;
280 if (access_ok(VERIFY_READ, src, 1))
281 res = __strncpy_from_user(dst, src, count);
282 return res;
283}
284
285#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
286
287static inline long __must_check strnlen_user(const char __user *s, long n)
288{
289 unsigned long res = 0;
290
291 if (__addr_ok(s))
292 res = __strnlen_user(s, n);
293
294 return res;
295}
296
297#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h
new file mode 100644
index 000000000000..bde960720892
--- /dev/null
+++ b/arch/arm64/include/asm/ucontext.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_UCONTEXT_H
17#define __ASM_UCONTEXT_H
18
19struct ucontext {
20 unsigned long uc_flags;
21 struct ucontext *uc_link;
22 stack_t uc_stack;
23 sigset_t uc_sigmask;
24 /* glibc uses a 1024-bit sigset_t */
25 __u8 __unused[(1024 - sizeof(sigset_t)) / 8];
26 /* last for future expansion */
27 struct sigcontext uc_mcontext;
28};
29
30#endif /* __ASM_UCONTEXT_H */
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
new file mode 100644
index 000000000000..fe18a683274f
--- /dev/null
+++ b/arch/arm64/include/asm/unistd.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#if !defined(__ASM_UNISTD_H) || defined(__SYSCALL)
17#define __ASM_UNISTD_H
18
19#ifndef __SYSCALL_COMPAT
20#include <asm-generic/unistd.h>
21#endif
22
23#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
24#include <asm/unistd32.h>
25#endif
26
27#endif /* __ASM_UNISTD_H */
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
new file mode 100644
index 000000000000..a50405f5ee42
--- /dev/null
+++ b/arch/arm64/include/asm/unistd32.h
@@ -0,0 +1,758 @@
1/*
2 * Based on arch/arm/include/asm/unistd.h
3 *
4 * Copyright (C) 2001-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#if !defined(__ASM_UNISTD32_H) || defined(__SYSCALL)
20#define __ASM_UNISTD32_H
21
22#ifndef __SYSCALL
23#define __SYSCALL(x, y)
24#endif
25
26/*
27 * This file contains the system call numbers.
28 */
29
30#ifdef __SYSCALL_COMPAT
31
32#define __NR_restart_syscall 0
33__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
34#define __NR_exit 1
35__SYSCALL(__NR_exit, sys_exit)
36#define __NR_fork 2
37__SYSCALL(__NR_fork, sys_fork)
38#define __NR_read 3
39__SYSCALL(__NR_read, sys_read)
40#define __NR_write 4
41__SYSCALL(__NR_write, sys_write)
42#define __NR_open 5
43__SYSCALL(__NR_open, sys_open)
44#define __NR_close 6
45__SYSCALL(__NR_close, sys_close)
46__SYSCALL(7, sys_ni_syscall) /* 7 was sys_waitpid */
47#define __NR_creat 8
48__SYSCALL(__NR_creat, sys_creat)
49#define __NR_link 9
50__SYSCALL(__NR_link, sys_link)
51#define __NR_unlink 10
52__SYSCALL(__NR_unlink, sys_unlink)
53#define __NR_execve 11
54__SYSCALL(__NR_execve, sys_execve)
55#define __NR_chdir 12
56__SYSCALL(__NR_chdir, sys_chdir)
57__SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */
58#define __NR_mknod 14
59__SYSCALL(__NR_mknod, sys_mknod)
60#define __NR_chmod 15
61__SYSCALL(__NR_chmod, sys_chmod)
62#define __NR_lchown 16
63__SYSCALL(__NR_lchown, sys_lchown16)
64__SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */
65__SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */
66#define __NR_lseek 19
67__SYSCALL(__NR_lseek, sys_lseek)
68#define __NR_getpid 20
69__SYSCALL(__NR_getpid, sys_getpid)
70#define __NR_mount 21
71__SYSCALL(__NR_mount, sys_mount)
72__SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */
73#define __NR_setuid 23
74__SYSCALL(__NR_setuid, sys_setuid16)
75#define __NR_getuid 24
76__SYSCALL(__NR_getuid, sys_getuid16)
77__SYSCALL(25, sys_ni_syscall) /* 25 was sys_stime */
78#define __NR_ptrace 26
79__SYSCALL(__NR_ptrace, sys_ptrace)
80__SYSCALL(27, sys_ni_syscall) /* 27 was sys_alarm */
81__SYSCALL(28, sys_ni_syscall) /* 28 was sys_fstat */
82#define __NR_pause 29
83__SYSCALL(__NR_pause, sys_pause)
84__SYSCALL(30, sys_ni_syscall) /* 30 was sys_utime */
85__SYSCALL(31, sys_ni_syscall) /* 31 was sys_stty */
86__SYSCALL(32, sys_ni_syscall) /* 32 was sys_gtty */
87#define __NR_access 33
88__SYSCALL(__NR_access, sys_access)
89#define __NR_nice 34
90__SYSCALL(__NR_nice, sys_nice)
91__SYSCALL(35, sys_ni_syscall) /* 35 was sys_ftime */
92#define __NR_sync 36
93__SYSCALL(__NR_sync, sys_sync)
94#define __NR_kill 37
95__SYSCALL(__NR_kill, sys_kill)
96#define __NR_rename 38
97__SYSCALL(__NR_rename, sys_rename)
98#define __NR_mkdir 39
99__SYSCALL(__NR_mkdir, sys_mkdir)
100#define __NR_rmdir 40
101__SYSCALL(__NR_rmdir, sys_rmdir)
102#define __NR_dup 41
103__SYSCALL(__NR_dup, sys_dup)
104#define __NR_pipe 42
105__SYSCALL(__NR_pipe, sys_pipe)
106#define __NR_times 43
107__SYSCALL(__NR_times, sys_times)
108__SYSCALL(44, sys_ni_syscall) /* 44 was sys_prof */
109#define __NR_brk 45
110__SYSCALL(__NR_brk, sys_brk)
111#define __NR_setgid 46
112__SYSCALL(__NR_setgid, sys_setgid16)
113#define __NR_getgid 47
114__SYSCALL(__NR_getgid, sys_getgid16)
115__SYSCALL(48, sys_ni_syscall) /* 48 was sys_signal */
116#define __NR_geteuid 49
117__SYSCALL(__NR_geteuid, sys_geteuid16)
118#define __NR_getegid 50
119__SYSCALL(__NR_getegid, sys_getegid16)
120#define __NR_acct 51
121__SYSCALL(__NR_acct, sys_acct)
122#define __NR_umount2 52
123__SYSCALL(__NR_umount2, sys_umount)
124__SYSCALL(53, sys_ni_syscall) /* 53 was sys_lock */
125#define __NR_ioctl 54
126__SYSCALL(__NR_ioctl, sys_ioctl)
127#define __NR_fcntl 55
128__SYSCALL(__NR_fcntl, sys_fcntl)
129__SYSCALL(56, sys_ni_syscall) /* 56 was sys_mpx */
130#define __NR_setpgid 57
131__SYSCALL(__NR_setpgid, sys_setpgid)
132__SYSCALL(58, sys_ni_syscall) /* 58 was sys_ulimit */
133__SYSCALL(59, sys_ni_syscall) /* 59 was sys_olduname */
134#define __NR_umask 60
135__SYSCALL(__NR_umask, sys_umask)
136#define __NR_chroot 61
137__SYSCALL(__NR_chroot, sys_chroot)
138#define __NR_ustat 62
139__SYSCALL(__NR_ustat, sys_ustat)
140#define __NR_dup2 63
141__SYSCALL(__NR_dup2, sys_dup2)
142#define __NR_getppid 64
143__SYSCALL(__NR_getppid, sys_getppid)
144#define __NR_getpgrp 65
145__SYSCALL(__NR_getpgrp, sys_getpgrp)
146#define __NR_setsid 66
147__SYSCALL(__NR_setsid, sys_setsid)
148#define __NR_sigaction 67
149__SYSCALL(__NR_sigaction, sys_sigaction)
150__SYSCALL(68, sys_ni_syscall) /* 68 was sys_sgetmask */
151__SYSCALL(69, sys_ni_syscall) /* 69 was sys_ssetmask */
152#define __NR_setreuid 70
153__SYSCALL(__NR_setreuid, sys_setreuid16)
154#define __NR_setregid 71
155__SYSCALL(__NR_setregid, sys_setregid16)
156#define __NR_sigsuspend 72
157__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
158#define __NR_sigpending 73
159__SYSCALL(__NR_sigpending, sys_sigpending)
160#define __NR_sethostname 74
161__SYSCALL(__NR_sethostname, sys_sethostname)
162#define __NR_setrlimit 75
163__SYSCALL(__NR_setrlimit, sys_setrlimit)
164__SYSCALL(76, sys_ni_syscall) /* 76 was sys_getrlimit */
165#define __NR_getrusage 77
166__SYSCALL(__NR_getrusage, sys_getrusage)
167#define __NR_gettimeofday 78
168__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
169#define __NR_settimeofday 79
170__SYSCALL(__NR_settimeofday, sys_settimeofday)
171#define __NR_getgroups 80
172__SYSCALL(__NR_getgroups, sys_getgroups16)
173#define __NR_setgroups 81
174__SYSCALL(__NR_setgroups, sys_setgroups16)
175__SYSCALL(82, sys_ni_syscall) /* 82 was sys_select */
176#define __NR_symlink 83
177__SYSCALL(__NR_symlink, sys_symlink)
178__SYSCALL(84, sys_ni_syscall) /* 84 was sys_lstat */
179#define __NR_readlink 85
180__SYSCALL(__NR_readlink, sys_readlink)
181#define __NR_uselib 86
182__SYSCALL(__NR_uselib, sys_uselib)
183#define __NR_swapon 87
184__SYSCALL(__NR_swapon, sys_swapon)
185#define __NR_reboot 88
186__SYSCALL(__NR_reboot, sys_reboot)
187__SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */
188__SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */
189#define __NR_munmap 91
190__SYSCALL(__NR_munmap, sys_munmap)
191#define __NR_truncate 92
192__SYSCALL(__NR_truncate, sys_truncate)
193#define __NR_ftruncate 93
194__SYSCALL(__NR_ftruncate, sys_ftruncate)
195#define __NR_fchmod 94
196__SYSCALL(__NR_fchmod, sys_fchmod)
197#define __NR_fchown 95
198__SYSCALL(__NR_fchown, sys_fchown16)
199#define __NR_getpriority 96
200__SYSCALL(__NR_getpriority, sys_getpriority)
201#define __NR_setpriority 97
202__SYSCALL(__NR_setpriority, sys_setpriority)
203__SYSCALL(98, sys_ni_syscall) /* 98 was sys_profil */
204#define __NR_statfs 99
205__SYSCALL(__NR_statfs, sys_statfs)
206#define __NR_fstatfs 100
207__SYSCALL(__NR_fstatfs, sys_fstatfs)
208__SYSCALL(101, sys_ni_syscall) /* 101 was sys_ioperm */
209__SYSCALL(102, sys_ni_syscall) /* 102 was sys_socketcall */
210#define __NR_syslog 103
211__SYSCALL(__NR_syslog, sys_syslog)
212#define __NR_setitimer 104
213__SYSCALL(__NR_setitimer, sys_setitimer)
214#define __NR_getitimer 105
215__SYSCALL(__NR_getitimer, sys_getitimer)
216#define __NR_stat 106
217__SYSCALL(__NR_stat, sys_newstat)
218#define __NR_lstat 107
219__SYSCALL(__NR_lstat, sys_newlstat)
220#define __NR_fstat 108
221__SYSCALL(__NR_fstat, sys_newfstat)
222__SYSCALL(109, sys_ni_syscall) /* 109 was sys_uname */
223__SYSCALL(110, sys_ni_syscall) /* 110 was sys_iopl */
224#define __NR_vhangup 111
225__SYSCALL(__NR_vhangup, sys_vhangup)
226__SYSCALL(112, sys_ni_syscall) /* 112 was sys_idle */
227__SYSCALL(113, sys_ni_syscall) /* 113 was sys_syscall */
228#define __NR_wait4 114
229__SYSCALL(__NR_wait4, sys_wait4)
230#define __NR_swapoff 115
231__SYSCALL(__NR_swapoff, sys_swapoff)
232#define __NR_sysinfo 116
233__SYSCALL(__NR_sysinfo, sys_sysinfo)
234__SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */
235#define __NR_fsync 118
236__SYSCALL(__NR_fsync, sys_fsync)
237#define __NR_sigreturn 119
238__SYSCALL(__NR_sigreturn, sys_sigreturn)
239#define __NR_clone 120
240__SYSCALL(__NR_clone, sys_clone)
241#define __NR_setdomainname 121
242__SYSCALL(__NR_setdomainname, sys_setdomainname)
243#define __NR_uname 122
244__SYSCALL(__NR_uname, sys_newuname)
245__SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */
246#define __NR_adjtimex 124
247__SYSCALL(__NR_adjtimex, sys_adjtimex)
248#define __NR_mprotect 125
249__SYSCALL(__NR_mprotect, sys_mprotect)
250#define __NR_sigprocmask 126
251__SYSCALL(__NR_sigprocmask, sys_sigprocmask)
252__SYSCALL(127, sys_ni_syscall) /* 127 was sys_create_module */
253#define __NR_init_module 128
254__SYSCALL(__NR_init_module, sys_init_module)
255#define __NR_delete_module 129
256__SYSCALL(__NR_delete_module, sys_delete_module)
257__SYSCALL(130, sys_ni_syscall) /* 130 was sys_get_kernel_syms */
258#define __NR_quotactl 131
259__SYSCALL(__NR_quotactl, sys_quotactl)
260#define __NR_getpgid 132
261__SYSCALL(__NR_getpgid, sys_getpgid)
262#define __NR_fchdir 133
263__SYSCALL(__NR_fchdir, sys_fchdir)
264#define __NR_bdflush 134
265__SYSCALL(__NR_bdflush, sys_bdflush)
266#define __NR_sysfs 135
267__SYSCALL(__NR_sysfs, sys_sysfs)
268#define __NR_personality 136
269__SYSCALL(__NR_personality, sys_personality)
270__SYSCALL(137, sys_ni_syscall) /* 137 was sys_afs_syscall */
271#define __NR_setfsuid 138
272__SYSCALL(__NR_setfsuid, sys_setfsuid16)
273#define __NR_setfsgid 139
274__SYSCALL(__NR_setfsgid, sys_setfsgid16)
275#define __NR__llseek 140
276__SYSCALL(__NR__llseek, sys_llseek)
277#define __NR_getdents 141
278__SYSCALL(__NR_getdents, sys_getdents)
279#define __NR__newselect 142
280__SYSCALL(__NR__newselect, sys_select)
281#define __NR_flock 143
282__SYSCALL(__NR_flock, sys_flock)
283#define __NR_msync 144
284__SYSCALL(__NR_msync, sys_msync)
285#define __NR_readv 145
286__SYSCALL(__NR_readv, sys_readv)
287#define __NR_writev 146
288__SYSCALL(__NR_writev, sys_writev)
289#define __NR_getsid 147
290__SYSCALL(__NR_getsid, sys_getsid)
291#define __NR_fdatasync 148
292__SYSCALL(__NR_fdatasync, sys_fdatasync)
293#define __NR__sysctl 149
294__SYSCALL(__NR__sysctl, sys_sysctl)
295#define __NR_mlock 150
296__SYSCALL(__NR_mlock, sys_mlock)
297#define __NR_munlock 151
298__SYSCALL(__NR_munlock, sys_munlock)
299#define __NR_mlockall 152
300__SYSCALL(__NR_mlockall, sys_mlockall)
301#define __NR_munlockall 153
302__SYSCALL(__NR_munlockall, sys_munlockall)
303#define __NR_sched_setparam 154
304__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
305#define __NR_sched_getparam 155
306__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
307#define __NR_sched_setscheduler 156
308__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
309#define __NR_sched_getscheduler 157
310__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
311#define __NR_sched_yield 158
312__SYSCALL(__NR_sched_yield, sys_sched_yield)
313#define __NR_sched_get_priority_max 159
314__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
315#define __NR_sched_get_priority_min 160
316__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
317#define __NR_sched_rr_get_interval 161
318__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
319#define __NR_nanosleep 162
320__SYSCALL(__NR_nanosleep, sys_nanosleep)
321#define __NR_mremap 163
322__SYSCALL(__NR_mremap, sys_mremap)
323#define __NR_setresuid 164
324__SYSCALL(__NR_setresuid, sys_setresuid16)
325#define __NR_getresuid 165
326__SYSCALL(__NR_getresuid, sys_getresuid16)
327__SYSCALL(166, sys_ni_syscall) /* 166 was sys_vm86 */
328__SYSCALL(167, sys_ni_syscall) /* 167 was sys_query_module */
329#define __NR_poll 168
330__SYSCALL(__NR_poll, sys_poll)
331#define __NR_nfsservctl 169
332__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
333#define __NR_setresgid 170
334__SYSCALL(__NR_setresgid, sys_setresgid16)
335#define __NR_getresgid 171
336__SYSCALL(__NR_getresgid, sys_getresgid16)
337#define __NR_prctl 172
338__SYSCALL(__NR_prctl, sys_prctl)
339#define __NR_rt_sigreturn 173
340__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn)
341#define __NR_rt_sigaction 174
342__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
343#define __NR_rt_sigprocmask 175
344__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
345#define __NR_rt_sigpending 176
346__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
347#define __NR_rt_sigtimedwait 177
348__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
349#define __NR_rt_sigqueueinfo 178
350__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
351#define __NR_rt_sigsuspend 179
352__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
353#define __NR_pread64 180
354__SYSCALL(__NR_pread64, sys_pread64)
355#define __NR_pwrite64 181
356__SYSCALL(__NR_pwrite64, sys_pwrite64)
357#define __NR_chown 182
358__SYSCALL(__NR_chown, sys_chown16)
359#define __NR_getcwd 183
360__SYSCALL(__NR_getcwd, sys_getcwd)
361#define __NR_capget 184
362__SYSCALL(__NR_capget, sys_capget)
363#define __NR_capset 185
364__SYSCALL(__NR_capset, sys_capset)
365#define __NR_sigaltstack 186
366__SYSCALL(__NR_sigaltstack, sys_sigaltstack)
367#define __NR_sendfile 187
368__SYSCALL(__NR_sendfile, sys_sendfile)
369__SYSCALL(188, sys_ni_syscall) /* 188 reserved */
370__SYSCALL(189, sys_ni_syscall) /* 189 reserved */
371#define __NR_vfork 190
372__SYSCALL(__NR_vfork, sys_vfork)
373#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
374__SYSCALL(__NR_ugetrlimit, sys_getrlimit)
375#define __NR_mmap2 192
376__SYSCALL(__NR_mmap2, sys_mmap2)
377#define __NR_truncate64 193
378__SYSCALL(__NR_truncate64, sys_truncate64)
379#define __NR_ftruncate64 194
380__SYSCALL(__NR_ftruncate64, sys_ftruncate64)
381#define __NR_stat64 195
382__SYSCALL(__NR_stat64, sys_stat64)
383#define __NR_lstat64 196
384__SYSCALL(__NR_lstat64, sys_lstat64)
385#define __NR_fstat64 197
386__SYSCALL(__NR_fstat64, sys_fstat64)
387#define __NR_lchown32 198
388__SYSCALL(__NR_lchown32, sys_lchown)
389#define __NR_getuid32 199
390__SYSCALL(__NR_getuid32, sys_getuid)
391#define __NR_getgid32 200
392__SYSCALL(__NR_getgid32, sys_getgid)
393#define __NR_geteuid32 201
394__SYSCALL(__NR_geteuid32, sys_geteuid)
395#define __NR_getegid32 202
396__SYSCALL(__NR_getegid32, sys_getegid)
397#define __NR_setreuid32 203
398__SYSCALL(__NR_setreuid32, sys_setreuid)
399#define __NR_setregid32 204
400__SYSCALL(__NR_setregid32, sys_setregid)
401#define __NR_getgroups32 205
402__SYSCALL(__NR_getgroups32, sys_getgroups)
403#define __NR_setgroups32 206
404__SYSCALL(__NR_setgroups32, sys_setgroups)
405#define __NR_fchown32 207
406__SYSCALL(__NR_fchown32, sys_fchown)
407#define __NR_setresuid32 208
408__SYSCALL(__NR_setresuid32, sys_setresuid)
409#define __NR_getresuid32 209
410__SYSCALL(__NR_getresuid32, sys_getresuid)
411#define __NR_setresgid32 210
412__SYSCALL(__NR_setresgid32, sys_setresgid)
413#define __NR_getresgid32 211
414__SYSCALL(__NR_getresgid32, sys_getresgid)
415#define __NR_chown32 212
416__SYSCALL(__NR_chown32, sys_chown)
417#define __NR_setuid32 213
418__SYSCALL(__NR_setuid32, sys_setuid)
419#define __NR_setgid32 214
420__SYSCALL(__NR_setgid32, sys_setgid)
421#define __NR_setfsuid32 215
422__SYSCALL(__NR_setfsuid32, sys_setfsuid)
423#define __NR_setfsgid32 216
424__SYSCALL(__NR_setfsgid32, sys_setfsgid)
425#define __NR_getdents64 217
426__SYSCALL(__NR_getdents64, sys_getdents64)
427#define __NR_pivot_root 218
428__SYSCALL(__NR_pivot_root, sys_pivot_root)
429#define __NR_mincore 219
430__SYSCALL(__NR_mincore, sys_mincore)
431#define __NR_madvise 220
432__SYSCALL(__NR_madvise, sys_madvise)
433#define __NR_fcntl64 221
434__SYSCALL(__NR_fcntl64, sys_fcntl64)
435__SYSCALL(222, sys_ni_syscall) /* 222 for tux */
436__SYSCALL(223, sys_ni_syscall) /* 223 is unused */
437#define __NR_gettid 224
438__SYSCALL(__NR_gettid, sys_gettid)
439#define __NR_readahead 225
440__SYSCALL(__NR_readahead, sys_readahead)
441#define __NR_setxattr 226
442__SYSCALL(__NR_setxattr, sys_setxattr)
443#define __NR_lsetxattr 227
444__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
445#define __NR_fsetxattr 228
446__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
447#define __NR_getxattr 229
448__SYSCALL(__NR_getxattr, sys_getxattr)
449#define __NR_lgetxattr 230
450__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
451#define __NR_fgetxattr 231
452__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
453#define __NR_listxattr 232
454__SYSCALL(__NR_listxattr, sys_listxattr)
455#define __NR_llistxattr 233
456__SYSCALL(__NR_llistxattr, sys_llistxattr)
457#define __NR_flistxattr 234
458__SYSCALL(__NR_flistxattr, sys_flistxattr)
459#define __NR_removexattr 235
460__SYSCALL(__NR_removexattr, sys_removexattr)
461#define __NR_lremovexattr 236
462__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
463#define __NR_fremovexattr 237
464__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
465#define __NR_tkill 238
466__SYSCALL(__NR_tkill, sys_tkill)
467#define __NR_sendfile64 239
468__SYSCALL(__NR_sendfile64, sys_sendfile64)
469#define __NR_futex 240
470__SYSCALL(__NR_futex, sys_futex)
471#define __NR_sched_setaffinity 241
472__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
473#define __NR_sched_getaffinity 242
474__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
475#define __NR_io_setup 243
476__SYSCALL(__NR_io_setup, sys_io_setup)
477#define __NR_io_destroy 244
478__SYSCALL(__NR_io_destroy, sys_io_destroy)
479#define __NR_io_getevents 245
480__SYSCALL(__NR_io_getevents, sys_io_getevents)
481#define __NR_io_submit 246
482__SYSCALL(__NR_io_submit, sys_io_submit)
483#define __NR_io_cancel 247
484__SYSCALL(__NR_io_cancel, sys_io_cancel)
485#define __NR_exit_group 248
486__SYSCALL(__NR_exit_group, sys_exit_group)
487#define __NR_lookup_dcookie 249
488__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
489#define __NR_epoll_create 250
490__SYSCALL(__NR_epoll_create, sys_epoll_create)
491#define __NR_epoll_ctl 251
492__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
493#define __NR_epoll_wait 252
494__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
495#define __NR_remap_file_pages 253
496__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
497__SYSCALL(254, sys_ni_syscall) /* 254 for set_thread_area */
498__SYSCALL(255, sys_ni_syscall) /* 255 for get_thread_area */
499#define __NR_set_tid_address 256
500__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
501#define __NR_timer_create 257
502__SYSCALL(__NR_timer_create, sys_timer_create)
503#define __NR_timer_settime 258
504__SYSCALL(__NR_timer_settime, sys_timer_settime)
505#define __NR_timer_gettime 259
506__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
507#define __NR_timer_getoverrun 260
508__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
509#define __NR_timer_delete 261
510__SYSCALL(__NR_timer_delete, sys_timer_delete)
511#define __NR_clock_settime 262
512__SYSCALL(__NR_clock_settime, sys_clock_settime)
513#define __NR_clock_gettime 263
514__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
515#define __NR_clock_getres 264
516__SYSCALL(__NR_clock_getres, sys_clock_getres)
517#define __NR_clock_nanosleep 265
518__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
519#define __NR_statfs64 266
520__SYSCALL(__NR_statfs64, sys_statfs64)
521#define __NR_fstatfs64 267
522__SYSCALL(__NR_fstatfs64, sys_fstatfs64)
523#define __NR_tgkill 268
524__SYSCALL(__NR_tgkill, sys_tgkill)
525#define __NR_utimes 269
526__SYSCALL(__NR_utimes, sys_utimes)
527#define __NR_fadvise64 270
528__SYSCALL(__NR_fadvise64, sys_fadvise64_64)
529#define __NR_pciconfig_iobase 271
530__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
531#define __NR_pciconfig_read 272
532__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
533#define __NR_pciconfig_write 273
534__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
535#define __NR_mq_open 274
536__SYSCALL(__NR_mq_open, sys_mq_open)
537#define __NR_mq_unlink 275
538__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
539#define __NR_mq_timedsend 276
540__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
541#define __NR_mq_timedreceive 277
542__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
543#define __NR_mq_notify 278
544__SYSCALL(__NR_mq_notify, sys_mq_notify)
545#define __NR_mq_getsetattr 279
546__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
547#define __NR_waitid 280
548__SYSCALL(__NR_waitid, sys_waitid)
549#define __NR_socket 281
550__SYSCALL(__NR_socket, sys_socket)
551#define __NR_bind 282
552__SYSCALL(__NR_bind, sys_bind)
553#define __NR_connect 283
554__SYSCALL(__NR_connect, sys_connect)
555#define __NR_listen 284
556__SYSCALL(__NR_listen, sys_listen)
557#define __NR_accept 285
558__SYSCALL(__NR_accept, sys_accept)
559#define __NR_getsockname 286
560__SYSCALL(__NR_getsockname, sys_getsockname)
561#define __NR_getpeername 287
562__SYSCALL(__NR_getpeername, sys_getpeername)
563#define __NR_socketpair 288
564__SYSCALL(__NR_socketpair, sys_socketpair)
565#define __NR_send 289
566__SYSCALL(__NR_send, sys_send)
567#define __NR_sendto 290
568__SYSCALL(__NR_sendto, sys_sendto)
569#define __NR_recv 291
570__SYSCALL(__NR_recv, sys_recv)
571#define __NR_recvfrom 292
572__SYSCALL(__NR_recvfrom, sys_recvfrom)
573#define __NR_shutdown 293
574__SYSCALL(__NR_shutdown, sys_shutdown)
575#define __NR_setsockopt 294
576__SYSCALL(__NR_setsockopt, sys_setsockopt)
577#define __NR_getsockopt 295
578__SYSCALL(__NR_getsockopt, sys_getsockopt)
579#define __NR_sendmsg 296
580__SYSCALL(__NR_sendmsg, sys_sendmsg)
581#define __NR_recvmsg 297
582__SYSCALL(__NR_recvmsg, sys_recvmsg)
583#define __NR_semop 298
584__SYSCALL(__NR_semop, sys_semop)
585#define __NR_semget 299
586__SYSCALL(__NR_semget, sys_semget)
587#define __NR_semctl 300
588__SYSCALL(__NR_semctl, sys_semctl)
589#define __NR_msgsnd 301
590__SYSCALL(__NR_msgsnd, sys_msgsnd)
591#define __NR_msgrcv 302
592__SYSCALL(__NR_msgrcv, sys_msgrcv)
593#define __NR_msgget 303
594__SYSCALL(__NR_msgget, sys_msgget)
595#define __NR_msgctl 304
596__SYSCALL(__NR_msgctl, sys_msgctl)
597#define __NR_shmat 305
598__SYSCALL(__NR_shmat, sys_shmat)
599#define __NR_shmdt 306
600__SYSCALL(__NR_shmdt, sys_shmdt)
601#define __NR_shmget 307
602__SYSCALL(__NR_shmget, sys_shmget)
603#define __NR_shmctl 308
604__SYSCALL(__NR_shmctl, sys_shmctl)
605#define __NR_add_key 309
606__SYSCALL(__NR_add_key, sys_add_key)
607#define __NR_request_key 310
608__SYSCALL(__NR_request_key, sys_request_key)
609#define __NR_keyctl 311
610__SYSCALL(__NR_keyctl, sys_keyctl)
611#define __NR_semtimedop 312
612__SYSCALL(__NR_semtimedop, sys_semtimedop)
613#define __NR_vserver 313
614__SYSCALL(__NR_vserver, sys_ni_syscall)
615#define __NR_ioprio_set 314
616__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
617#define __NR_ioprio_get 315
618__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
619#define __NR_inotify_init 316
620__SYSCALL(__NR_inotify_init, sys_inotify_init)
621#define __NR_inotify_add_watch 317
622__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
623#define __NR_inotify_rm_watch 318
624__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
625#define __NR_mbind 319
626__SYSCALL(__NR_mbind, sys_mbind)
627#define __NR_get_mempolicy 320
628__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
629#define __NR_set_mempolicy 321
630__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
631#define __NR_openat 322
632__SYSCALL(__NR_openat, sys_openat)
633#define __NR_mkdirat 323
634__SYSCALL(__NR_mkdirat, sys_mkdirat)
635#define __NR_mknodat 324
636__SYSCALL(__NR_mknodat, sys_mknodat)
637#define __NR_fchownat 325
638__SYSCALL(__NR_fchownat, sys_fchownat)
639#define __NR_futimesat 326
640__SYSCALL(__NR_futimesat, sys_futimesat)
641#define __NR_fstatat64 327
642__SYSCALL(__NR_fstatat64, sys_fstatat64)
643#define __NR_unlinkat 328
644__SYSCALL(__NR_unlinkat, sys_unlinkat)
645#define __NR_renameat 329
646__SYSCALL(__NR_renameat, sys_renameat)
647#define __NR_linkat 330
648__SYSCALL(__NR_linkat, sys_linkat)
649#define __NR_symlinkat 331
650__SYSCALL(__NR_symlinkat, sys_symlinkat)
651#define __NR_readlinkat 332
652__SYSCALL(__NR_readlinkat, sys_readlinkat)
653#define __NR_fchmodat 333
654__SYSCALL(__NR_fchmodat, sys_fchmodat)
655#define __NR_faccessat 334
656__SYSCALL(__NR_faccessat, sys_faccessat)
657#define __NR_pselect6 335
658__SYSCALL(__NR_pselect6, sys_pselect6)
659#define __NR_ppoll 336
660__SYSCALL(__NR_ppoll, sys_ppoll)
661#define __NR_unshare 337
662__SYSCALL(__NR_unshare, sys_unshare)
663#define __NR_set_robust_list 338
664__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
665#define __NR_get_robust_list 339
666__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
667#define __NR_splice 340
668__SYSCALL(__NR_splice, sys_splice)
669#define __NR_sync_file_range2 341
670__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2)
671#define __NR_tee 342
672__SYSCALL(__NR_tee, sys_tee)
673#define __NR_vmsplice 343
674__SYSCALL(__NR_vmsplice, sys_vmsplice)
675#define __NR_move_pages 344
676__SYSCALL(__NR_move_pages, sys_move_pages)
677#define __NR_getcpu 345
678__SYSCALL(__NR_getcpu, sys_getcpu)
679#define __NR_epoll_pwait 346
680__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
681#define __NR_kexec_load 347
682__SYSCALL(__NR_kexec_load, sys_kexec_load)
683#define __NR_utimensat 348
684__SYSCALL(__NR_utimensat, sys_utimensat)
685#define __NR_signalfd 349
686__SYSCALL(__NR_signalfd, sys_signalfd)
687#define __NR_timerfd_create 350
688__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
689#define __NR_eventfd 351
690__SYSCALL(__NR_eventfd, sys_eventfd)
691#define __NR_fallocate 352
692__SYSCALL(__NR_fallocate, sys_fallocate)
693#define __NR_timerfd_settime 353
694__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
695#define __NR_timerfd_gettime 354
696__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
697#define __NR_signalfd4 355
698__SYSCALL(__NR_signalfd4, sys_signalfd4)
699#define __NR_eventfd2 356
700__SYSCALL(__NR_eventfd2, sys_eventfd2)
701#define __NR_epoll_create1 357
702__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
703#define __NR_dup3 358
704__SYSCALL(__NR_dup3, sys_dup3)
705#define __NR_pipe2 359
706__SYSCALL(__NR_pipe2, sys_pipe2)
707#define __NR_inotify_init1 360
708__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
709#define __NR_preadv 361
710__SYSCALL(__NR_preadv, sys_preadv)
711#define __NR_pwritev 362
712__SYSCALL(__NR_pwritev, sys_pwritev)
713#define __NR_rt_tgsigqueueinfo 363
714__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
715#define __NR_perf_event_open 364
716__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
717#define __NR_recvmmsg 365
718__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
719#define __NR_accept4 366
720__SYSCALL(__NR_accept4, sys_accept4)
721#define __NR_fanotify_init 367
722__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
723#define __NR_fanotify_mark 368
724__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
725#define __NR_prlimit64 369
726__SYSCALL(__NR_prlimit64, sys_prlimit64)
727#define __NR_name_to_handle_at 370
728__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
729#define __NR_open_by_handle_at 371
730__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
731#define __NR_clock_adjtime 372
732__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
733#define __NR_syncfs 373
734__SYSCALL(__NR_syncfs, sys_syncfs)
735
736/*
737 * The following SVCs are ARM private.
738 */
739#define __ARM_NR_COMPAT_BASE 0x0f0000
740#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
741#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
742
743#endif /* __SYSCALL_COMPAT */
744
745#define __NR_compat_syscalls 374
746
747#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
748#define __ARCH_WANT_COMPAT_STAT64
749#define __ARCH_WANT_SYS_GETHOSTNAME
750#define __ARCH_WANT_SYS_PAUSE
751#define __ARCH_WANT_SYS_GETPGRP
752#define __ARCH_WANT_SYS_LLSEEK
753#define __ARCH_WANT_SYS_NICE
754#define __ARCH_WANT_SYS_SIGPENDING
755#define __ARCH_WANT_SYS_SIGPROCMASK
756#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
757
758#endif /* __ASM_UNISTD32_H */
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
new file mode 100644
index 000000000000..839ce0031bd5
--- /dev/null
+++ b/arch/arm64/include/asm/vdso.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_VDSO_H
17#define __ASM_VDSO_H
18
19#ifdef __KERNEL__
20
21/*
22 * Default link address for the vDSO.
23 * Since we randomise the VDSO mapping, there's little point in trying
24 * to prelink this.
25 */
26#define VDSO_LBASE 0x0
27
28#ifndef __ASSEMBLY__
29
30#include <generated/vdso-offsets.h>
31
32#define VDSO_SYMBOL(base, name) \
33({ \
34 (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \
35})
36
37#endif /* !__ASSEMBLY__ */
38
39#endif /* __KERNEL__ */
40
41#endif /* __ASM_VDSO_H */
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
new file mode 100644
index 000000000000..de66199673d7
--- /dev/null
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_VDSO_DATAPAGE_H
17#define __ASM_VDSO_DATAPAGE_H
18
19#ifdef __KERNEL__
20
21#ifndef __ASSEMBLY__
22
23struct vdso_data {
24 __u64 cs_cycle_last; /* Timebase at clocksource init */
25 __u64 xtime_clock_sec; /* Kernel time */
26 __u64 xtime_clock_nsec;
27 __u64 xtime_coarse_sec; /* Coarse time */
28 __u64 xtime_coarse_nsec;
29 __u64 wtm_clock_sec; /* Wall to monotonic time */
30 __u64 wtm_clock_nsec;
31 __u32 tb_seq_count; /* Timebase sequence counter */
32 __u32 cs_mult; /* Clocksource multiplier */
33 __u32 cs_shift; /* Clocksource shift */
34 __u32 tz_minuteswest; /* Whacky timezone stuff */
35 __u32 tz_dsttime;
36 __u32 use_syscall;
37};
38
39#endif /* !__ASSEMBLY__ */
40
41#endif /* __KERNEL__ */
42
43#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/arm64/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
new file mode 100644
index 000000000000..e2caff1b812a
--- /dev/null
+++ b/arch/arm64/kernel/Makefile
@@ -0,0 +1,27 @@
1#
2# Makefile for the linux kernel.
3#
4
5CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
6AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7
8# Object file lists.
9arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
10 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
11 sys.o stacktrace.o time.o traps.o io.o vdso.o
12
13arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
14 sys_compat.o
15arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
16arm64-obj-$(CONFIG_SMP) += smp.o
17arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
18arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
19
20obj-y += $(arm64-obj-y) vdso/
21obj-m += $(arm64-obj-m)
22head-y := head.o
23extra-y := $(head-y) vmlinux.lds
24
25# vDSO - this must be built first to generate the symbol offsets
26$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
27$(obj)/vdso/vdso-offsets.h: $(obj)/vdso
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
new file mode 100644
index 000000000000..cef3925eaf60
--- /dev/null
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -0,0 +1,46 @@
1/*
2 * Based on arch/arm/kernel/armksyms.c
3 *
4 * Copyright (C) 2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/sched.h>
22#include <linux/string.h>
23#include <linux/cryptohash.h>
24#include <linux/delay.h>
25#include <linux/in6.h>
26#include <linux/syscalls.h>
27#include <linux/uaccess.h>
28#include <linux/io.h>
29
30#include <asm/checksum.h>
31
32 /* user mem (segment) */
33EXPORT_SYMBOL(__strnlen_user);
34EXPORT_SYMBOL(__strncpy_from_user);
35
36EXPORT_SYMBOL(copy_page);
37
38EXPORT_SYMBOL(__copy_from_user);
39EXPORT_SYMBOL(__copy_to_user);
40EXPORT_SYMBOL(__clear_user);
41
42 /* bitops */
43EXPORT_SYMBOL(__atomic_hash);
44
45 /* physical memory */
46EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
new file mode 100644
index 000000000000..a2a4d810bea3
--- /dev/null
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -0,0 +1,108 @@
1/*
2 * Based on arch/arm/kernel/asm-offsets.c
3 *
4 * Copyright (C) 1995-2003 Russell King
5 * 2001-2002 Keith Owens
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <linux/dma-mapping.h>
24#include <asm/thread_info.h>
25#include <asm/memory.h>
26#include <asm/cputable.h>
27#include <asm/vdso_datapage.h>
28#include <linux/kbuild.h>
29
30int main(void)
31{
32 DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
33 BLANK();
34 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
35 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
36 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
37 DEFINE(TI_TASK, offsetof(struct thread_info, task));
38 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
39 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
40 BLANK();
41 DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
42 BLANK();
43 DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
44 DEFINE(S_X1, offsetof(struct pt_regs, regs[1]));
45 DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
46 DEFINE(S_X3, offsetof(struct pt_regs, regs[3]));
47 DEFINE(S_X4, offsetof(struct pt_regs, regs[4]));
48 DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
49 DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
50 DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
51 DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
52 DEFINE(S_SP, offsetof(struct pt_regs, sp));
53#ifdef CONFIG_COMPAT
54 DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp));
55#endif
56 DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
57 DEFINE(S_PC, offsetof(struct pt_regs, pc));
58 DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
59 DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
60 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
61 BLANK();
62 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
63 BLANK();
64 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
65 DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
66 BLANK();
67 DEFINE(VM_EXEC, VM_EXEC);
68 BLANK();
69 DEFINE(PAGE_SZ, PAGE_SIZE);
70 BLANK();
71 DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info));
72 DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup));
73 BLANK();
74 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
75 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
76 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
77 BLANK();
78 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
79 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
80 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
81 DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
82 DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
83 DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
84 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
85 BLANK();
86 DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
87 DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
88 DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
89 DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
90 DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
91 DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
92 DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
93 DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
94 DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult));
95 DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
96 DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
97 DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
98 DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall));
99 BLANK();
100 DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec));
101 DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec));
102 DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
103 DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
104 BLANK();
105 DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
106 DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
107 return 0;
108}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
new file mode 100644
index 000000000000..63cfc4a43f4e
--- /dev/null
+++ b/arch/arm64/kernel/cputable.c
@@ -0,0 +1,33 @@
1/*
2 * arch/arm64/kernel/cputable.c
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/init.h>
20
21#include <asm/cputable.h>
22
23extern unsigned long __cpu_setup(void);
24
25struct cpu_info __initdata cpu_table[] = {
26 {
27 .cpu_id_val = 0x000f0000,
28 .cpu_id_mask = 0x000f0000,
29 .cpu_name = "AArch64 Processor",
30 .cpu_setup = __cpu_setup,
31 },
32 { /* Empty */ },
33};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
new file mode 100644
index 000000000000..0c3ba9f51376
--- /dev/null
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -0,0 +1,288 @@
1/*
2 * ARMv8 single-step debug support and mdscr context switching.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/cpu.h>
22#include <linux/debugfs.h>
23#include <linux/hardirq.h>
24#include <linux/init.h>
25#include <linux/ptrace.h>
26#include <linux/stat.h>
27
28#include <asm/debug-monitors.h>
29#include <asm/local.h>
30#include <asm/cputype.h>
31#include <asm/system_misc.h>
32
33/* Low-level stepping controls. */
34#define DBG_MDSCR_SS (1 << 0)
35#define DBG_SPSR_SS (1 << 21)
36
37/* MDSCR_EL1 enabling bits */
38#define DBG_MDSCR_KDE (1 << 13)
39#define DBG_MDSCR_MDE (1 << 15)
40#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
41
42/* Determine debug architecture. */
43u8 debug_monitors_arch(void)
44{
45 return read_cpuid(ID_AA64DFR0_EL1) & 0xf;
46}
47
48/*
49 * MDSCR access routines.
50 */
51static void mdscr_write(u32 mdscr)
52{
53 unsigned long flags;
54 local_dbg_save(flags);
55 asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
56 local_dbg_restore(flags);
57}
58
59static u32 mdscr_read(void)
60{
61 u32 mdscr;
62 asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
63 return mdscr;
64}
65
66/*
67 * Allow root to disable self-hosted debug from userspace.
68 * This is useful if you want to connect an external JTAG debugger.
69 */
70static u32 debug_enabled = 1;
71
72static int create_debug_debugfs_entry(void)
73{
74 debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
75 return 0;
76}
77fs_initcall(create_debug_debugfs_entry);
78
79static int __init early_debug_disable(char *buf)
80{
81 debug_enabled = 0;
82 return 0;
83}
84
85early_param("nodebugmon", early_debug_disable);
86
87/*
88 * Keep track of debug users on each core.
89 * The ref counts are per-cpu so we use a local_t type.
90 */
91static DEFINE_PER_CPU(local_t, mde_ref_count);
92static DEFINE_PER_CPU(local_t, kde_ref_count);
93
94void enable_debug_monitors(enum debug_el el)
95{
96 u32 mdscr, enable = 0;
97
98 WARN_ON(preemptible());
99
100 if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
101 enable = DBG_MDSCR_MDE;
102
103 if (el == DBG_ACTIVE_EL1 &&
104 local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
105 enable |= DBG_MDSCR_KDE;
106
107 if (enable && debug_enabled) {
108 mdscr = mdscr_read();
109 mdscr |= enable;
110 mdscr_write(mdscr);
111 }
112}
113
114void disable_debug_monitors(enum debug_el el)
115{
116 u32 mdscr, disable = 0;
117
118 WARN_ON(preemptible());
119
120 if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
121 disable = ~DBG_MDSCR_MDE;
122
123 if (el == DBG_ACTIVE_EL1 &&
124 local_dec_and_test(&__get_cpu_var(kde_ref_count)))
125 disable &= ~DBG_MDSCR_KDE;
126
127 if (disable) {
128 mdscr = mdscr_read();
129 mdscr &= disable;
130 mdscr_write(mdscr);
131 }
132}
133
134/*
135 * OS lock clearing.
136 */
137static void clear_os_lock(void *unused)
138{
139 asm volatile("msr mdscr_el1, %0" : : "r" (0));
140 isb();
141 asm volatile("msr oslar_el1, %0" : : "r" (0));
142 isb();
143}
144
145static int __cpuinit os_lock_notify(struct notifier_block *self,
146 unsigned long action, void *data)
147{
148 int cpu = (unsigned long)data;
149 if (action == CPU_ONLINE)
150 smp_call_function_single(cpu, clear_os_lock, NULL, 1);
151 return NOTIFY_OK;
152}
153
154static struct notifier_block __cpuinitdata os_lock_nb = {
155 .notifier_call = os_lock_notify,
156};
157
158static int __cpuinit debug_monitors_init(void)
159{
160 /* Clear the OS lock. */
161 smp_call_function(clear_os_lock, NULL, 1);
162 clear_os_lock(NULL);
163
164 /* Register hotplug handler. */
165 register_cpu_notifier(&os_lock_nb);
166 return 0;
167}
168postcore_initcall(debug_monitors_init);
169
170/*
171 * Single step API and exception handling.
172 */
173static void set_regs_spsr_ss(struct pt_regs *regs)
174{
175 unsigned long spsr;
176
177 spsr = regs->pstate;
178 spsr &= ~DBG_SPSR_SS;
179 spsr |= DBG_SPSR_SS;
180 regs->pstate = spsr;
181}
182
183static void clear_regs_spsr_ss(struct pt_regs *regs)
184{
185 unsigned long spsr;
186
187 spsr = regs->pstate;
188 spsr &= ~DBG_SPSR_SS;
189 regs->pstate = spsr;
190}
191
192static int single_step_handler(unsigned long addr, unsigned int esr,
193 struct pt_regs *regs)
194{
195 siginfo_t info;
196
197 /*
198 * If we are stepping a pending breakpoint, call the hw_breakpoint
199 * handler first.
200 */
201 if (!reinstall_suspended_bps(regs))
202 return 0;
203
204 if (user_mode(regs)) {
205 info.si_signo = SIGTRAP;
206 info.si_errno = 0;
207 info.si_code = TRAP_HWBKPT;
208 info.si_addr = (void __user *)instruction_pointer(regs);
209 force_sig_info(SIGTRAP, &info, current);
210
211 /*
212 * ptrace will disable single step unless explicitly
213 * asked to re-enable it. For other clients, it makes
214 * sense to leave it enabled (i.e. rewind the controls
215 * to the active-not-pending state).
216 */
217 user_rewind_single_step(current);
218 } else {
219 /* TODO: route to KGDB */
220 pr_warning("Unexpected kernel single-step exception at EL1\n");
221 /*
222 * Re-enable stepping since we know that we will be
223 * returning to regs.
224 */
225 set_regs_spsr_ss(regs);
226 }
227
228 return 0;
229}
230
231static int __init single_step_init(void)
232{
233 hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
234 TRAP_HWBKPT, "single-step handler");
235 return 0;
236}
237arch_initcall(single_step_init);
238
239/* Re-enable single step for syscall restarting. */
240void user_rewind_single_step(struct task_struct *task)
241{
242 /*
243 * If single step is active for this thread, then set SPSR.SS
244 * to 1 to avoid returning to the active-pending state.
245 */
246 if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
247 set_regs_spsr_ss(task_pt_regs(task));
248}
249
250void user_fastforward_single_step(struct task_struct *task)
251{
252 if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
253 clear_regs_spsr_ss(task_pt_regs(task));
254}
255
256/* Kernel API */
257void kernel_enable_single_step(struct pt_regs *regs)
258{
259 WARN_ON(!irqs_disabled());
260 set_regs_spsr_ss(regs);
261 mdscr_write(mdscr_read() | DBG_MDSCR_SS);
262 enable_debug_monitors(DBG_ACTIVE_EL1);
263}
264
265void kernel_disable_single_step(void)
266{
267 WARN_ON(!irqs_disabled());
268 mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
269 disable_debug_monitors(DBG_ACTIVE_EL1);
270}
271
272int kernel_active_single_step(void)
273{
274 WARN_ON(!irqs_disabled());
275 return mdscr_read() & DBG_MDSCR_SS;
276}
277
278/* ptrace API */
279void user_enable_single_step(struct task_struct *task)
280{
281 set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
282 set_regs_spsr_ss(task_pt_regs(task));
283}
284
285void user_disable_single_step(struct task_struct *task)
286{
287 clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
288}
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
new file mode 100644
index 000000000000..17988a6e7ea2
--- /dev/null
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -0,0 +1,80 @@
1/*
2 * FP/SIMD state saving and restoring
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21
22#include <asm/assembler.h>
23
24/*
25 * Save the FP registers.
26 *
27 * x0 - pointer to struct fpsimd_state
28 */
29ENTRY(fpsimd_save_state)
30 stp q0, q1, [x0, #16 * 0]
31 stp q2, q3, [x0, #16 * 2]
32 stp q4, q5, [x0, #16 * 4]
33 stp q6, q7, [x0, #16 * 6]
34 stp q8, q9, [x0, #16 * 8]
35 stp q10, q11, [x0, #16 * 10]
36 stp q12, q13, [x0, #16 * 12]
37 stp q14, q15, [x0, #16 * 14]
38 stp q16, q17, [x0, #16 * 16]
39 stp q18, q19, [x0, #16 * 18]
40 stp q20, q21, [x0, #16 * 20]
41 stp q22, q23, [x0, #16 * 22]
42 stp q24, q25, [x0, #16 * 24]
43 stp q26, q27, [x0, #16 * 26]
44 stp q28, q29, [x0, #16 * 28]
45 stp q30, q31, [x0, #16 * 30]!
46 mrs x8, fpsr
47 str w8, [x0, #16 * 2]
48 mrs x8, fpcr
49 str w8, [x0, #16 * 2 + 4]
50 ret
51ENDPROC(fpsimd_save_state)
52
53/*
54 * Load the FP registers.
55 *
56 * x0 - pointer to struct fpsimd_state
57 */
58ENTRY(fpsimd_load_state)
59 ldp q0, q1, [x0, #16 * 0]
60 ldp q2, q3, [x0, #16 * 2]
61 ldp q4, q5, [x0, #16 * 4]
62 ldp q6, q7, [x0, #16 * 6]
63 ldp q8, q9, [x0, #16 * 8]
64 ldp q10, q11, [x0, #16 * 10]
65 ldp q12, q13, [x0, #16 * 12]
66 ldp q14, q15, [x0, #16 * 14]
67 ldp q16, q17, [x0, #16 * 16]
68 ldp q18, q19, [x0, #16 * 18]
69 ldp q20, q21, [x0, #16 * 20]
70 ldp q22, q23, [x0, #16 * 22]
71 ldp q24, q25, [x0, #16 * 24]
72 ldp q26, q27, [x0, #16 * 26]
73 ldp q28, q29, [x0, #16 * 28]
74 ldp q30, q31, [x0, #16 * 30]!
75 ldr w8, [x0, #16 * 2]
76 ldr w9, [x0, #16 * 2 + 4]
77 msr fpsr, x8
78 msr fpcr, x9
79 ret
80ENDPROC(fpsimd_load_state)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
new file mode 100644
index 000000000000..38cf853a3667
--- /dev/null
+++ b/arch/arm64/kernel/entry.S
@@ -0,0 +1,695 @@
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/thread_info.h>
28#include <asm/unistd.h>
29
30/*
31 * Bad Abort numbers
32 *-----------------
33 */
34#define BAD_SYNC 0
35#define BAD_IRQ 1
36#define BAD_FIQ 2
37#define BAD_ERROR 3
38
39 .macro kernel_entry, el, regsize = 64
40 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
41 .if \regsize == 32
42 mov w0, w0 // zero upper 32 bits of x0
43 .endif
44 push x28, x29
45 push x26, x27
46 push x24, x25
47 push x22, x23
48 push x20, x21
49 push x18, x19
50 push x16, x17
51 push x14, x15
52 push x12, x13
53 push x10, x11
54 push x8, x9
55 push x6, x7
56 push x4, x5
57 push x2, x3
58 push x0, x1
59 .if \el == 0
60 mrs x21, sp_el0
61 .else
62 add x21, sp, #S_FRAME_SIZE
63 .endif
64 mrs x22, elr_el1
65 mrs x23, spsr_el1
66 stp lr, x21, [sp, #S_LR]
67 stp x22, x23, [sp, #S_PC]
68
69 /*
70 * Set syscallno to -1 by default (overridden later if real syscall).
71 */
72 .if \el == 0
73 mvn x21, xzr
74 str x21, [sp, #S_SYSCALLNO]
75 .endif
76
77 /*
78 * Registers that may be useful after this macro is invoked:
79 *
80 * x21 - aborted SP
81 * x22 - aborted PC
82 * x23 - aborted PSTATE
83 */
84 .endm
85
86 .macro kernel_exit, el, ret = 0
87 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
88 .if \el == 0
89 ldr x23, [sp, #S_SP] // load return stack pointer
90 .endif
91 .if \ret
92 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
93 add sp, sp, S_X2
94 .else
95 pop x0, x1
96 .endif
97 pop x2, x3 // load the rest of the registers
98 pop x4, x5
99 pop x6, x7
100 pop x8, x9
101 msr elr_el1, x21 // set up the return data
102 msr spsr_el1, x22
103 .if \el == 0
104 msr sp_el0, x23
105 .endif
106 pop x10, x11
107 pop x12, x13
108 pop x14, x15
109 pop x16, x17
110 pop x18, x19
111 pop x20, x21
112 pop x22, x23
113 pop x24, x25
114 pop x26, x27
115 pop x28, x29
116 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
117 eret // return to kernel
118 .endm
119
120 .macro get_thread_info, rd
121 mov \rd, sp
122 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
123 .endm
124
125/*
126 * These are the registers used in the syscall handler, and allow us to
127 * have in theory up to 7 arguments to a function - x0 to x6.
128 *
129 * x7 is reserved for the system call number in 32-bit mode.
130 */
131sc_nr .req x25 // number of system calls
132scno .req x26 // syscall number
133stbl .req x27 // syscall table pointer
134tsk .req x28 // current thread_info
135
136/*
137 * Interrupt handling.
138 */
139 .macro irq_handler
140 ldr x1, handle_arch_irq
141 mov x0, sp
142 blr x1
143 .endm
144
145 .text
146
147/*
148 * Exception vectors.
149 */
150 .macro ventry label
151 .align 7
152 b \label
153 .endm
154
155 .align 11
156ENTRY(vectors)
157 ventry el1_sync_invalid // Synchronous EL1t
158 ventry el1_irq_invalid // IRQ EL1t
159 ventry el1_fiq_invalid // FIQ EL1t
160 ventry el1_error_invalid // Error EL1t
161
162 ventry el1_sync // Synchronous EL1h
163 ventry el1_irq // IRQ EL1h
164 ventry el1_fiq_invalid // FIQ EL1h
165 ventry el1_error_invalid // Error EL1h
166
167 ventry el0_sync // Synchronous 64-bit EL0
168 ventry el0_irq // IRQ 64-bit EL0
169 ventry el0_fiq_invalid // FIQ 64-bit EL0
170 ventry el0_error_invalid // Error 64-bit EL0
171
172#ifdef CONFIG_COMPAT
173 ventry el0_sync_compat // Synchronous 32-bit EL0
174 ventry el0_irq_compat // IRQ 32-bit EL0
175 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
176 ventry el0_error_invalid_compat // Error 32-bit EL0
177#else
178 ventry el0_sync_invalid // Synchronous 32-bit EL0
179 ventry el0_irq_invalid // IRQ 32-bit EL0
180 ventry el0_fiq_invalid // FIQ 32-bit EL0
181 ventry el0_error_invalid // Error 32-bit EL0
182#endif
183END(vectors)
184
185/*
186 * Invalid mode handlers
187 */
188 .macro inv_entry, el, reason, regsize = 64
189 kernel_entry el, \regsize
190 mov x0, sp
191 mov x1, #\reason
192 mrs x2, esr_el1
193 b bad_mode
194 .endm
195
196el0_sync_invalid:
197 inv_entry 0, BAD_SYNC
198ENDPROC(el0_sync_invalid)
199
200el0_irq_invalid:
201 inv_entry 0, BAD_IRQ
202ENDPROC(el0_irq_invalid)
203
204el0_fiq_invalid:
205 inv_entry 0, BAD_FIQ
206ENDPROC(el0_fiq_invalid)
207
208el0_error_invalid:
209 inv_entry 0, BAD_ERROR
210ENDPROC(el0_error_invalid)
211
212#ifdef CONFIG_COMPAT
213el0_fiq_invalid_compat:
214 inv_entry 0, BAD_FIQ, 32
215ENDPROC(el0_fiq_invalid_compat)
216
217el0_error_invalid_compat:
218 inv_entry 0, BAD_ERROR, 32
219ENDPROC(el0_error_invalid_compat)
220#endif
221
222el1_sync_invalid:
223 inv_entry 1, BAD_SYNC
224ENDPROC(el1_sync_invalid)
225
226el1_irq_invalid:
227 inv_entry 1, BAD_IRQ
228ENDPROC(el1_irq_invalid)
229
230el1_fiq_invalid:
231 inv_entry 1, BAD_FIQ
232ENDPROC(el1_fiq_invalid)
233
234el1_error_invalid:
235 inv_entry 1, BAD_ERROR
236ENDPROC(el1_error_invalid)
237
238/*
239 * EL1 mode handlers.
240 */
241 .align 6
242el1_sync:
243 kernel_entry 1
244 mrs x1, esr_el1 // read the syndrome register
245 lsr x24, x1, #26 // exception class
246 cmp x24, #0x25 // data abort in EL1
247 b.eq el1_da
248 cmp x24, #0x18 // configurable trap
249 b.eq el1_undef
250 cmp x24, #0x26 // stack alignment exception
251 b.eq el1_sp_pc
252 cmp x24, #0x22 // pc alignment exception
253 b.eq el1_sp_pc
254 cmp x24, #0x00 // unknown exception in EL1
255 b.eq el1_undef
256 cmp x24, #0x30 // debug exception in EL1
257 b.ge el1_dbg
258 b el1_inv
259el1_da:
260 /*
261 * Data abort handling
262 */
263 mrs x0, far_el1
264 enable_dbg_if_not_stepping x2
265 // re-enable interrupts if they were enabled in the aborted context
266 tbnz x23, #7, 1f // PSR_I_BIT
267 enable_irq
2681:
269 mov x2, sp // struct pt_regs
270 bl do_mem_abort
271
272 // disable interrupts before pulling preserved data off the stack
273 disable_irq
274 kernel_exit 1
275el1_sp_pc:
276 /*
277 * Stack or PC alignment exception handling
278 */
279 mrs x0, far_el1
280 mov x1, x25
281 mov x2, sp
282 b do_sp_pc_abort
283el1_undef:
284 /*
285 * Undefined instruction
286 */
287 mov x0, sp
288 b do_undefinstr
289el1_dbg:
290 /*
291 * Debug exception handling
292 */
293 tbz x24, #0, el1_inv // EL1 only
294 mrs x0, far_el1
295 mov x2, sp // struct pt_regs
296 bl do_debug_exception
297
298 kernel_exit 1
299el1_inv:
300 // TODO: add support for undefined instructions in kernel mode
301 mov x0, sp
302 mov x1, #BAD_SYNC
303 mrs x2, esr_el1
304 b bad_mode
305ENDPROC(el1_sync)
306
307 .align 6
308el1_irq:
309 kernel_entry 1
310 enable_dbg_if_not_stepping x0
311#ifdef CONFIG_TRACE_IRQFLAGS
312 bl trace_hardirqs_off
313#endif
314#ifdef CONFIG_PREEMPT
315 get_thread_info tsk
316 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
317 add x0, x24, #1 // increment it
318 str x0, [tsk, #TI_PREEMPT]
319#endif
320 irq_handler
321#ifdef CONFIG_PREEMPT
322 str x24, [tsk, #TI_PREEMPT] // restore preempt count
323 cbnz x24, 1f // preempt count != 0
324 ldr x0, [tsk, #TI_FLAGS] // get flags
325 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
326 bl el1_preempt
3271:
328#endif
329#ifdef CONFIG_TRACE_IRQFLAGS
330 bl trace_hardirqs_on
331#endif
332 kernel_exit 1
333ENDPROC(el1_irq)
334
335#ifdef CONFIG_PREEMPT
336el1_preempt:
337 mov x24, lr
3381: enable_dbg
339 bl preempt_schedule_irq // irq en/disable is done inside
340 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
341 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
342 ret x24
343#endif
344
345/*
346 * EL0 mode handlers.
347 */
348 .align 6
349el0_sync:
350 kernel_entry 0
351 mrs x25, esr_el1 // read the syndrome register
352 lsr x24, x25, #26 // exception class
353 cmp x24, #0x15 // SVC in 64-bit state
354 b.eq el0_svc
355 adr lr, ret_from_exception
356 cmp x24, #0x24 // data abort in EL0
357 b.eq el0_da
358 cmp x24, #0x20 // instruction abort in EL0
359 b.eq el0_ia
360 cmp x24, #0x07 // FP/ASIMD access
361 b.eq el0_fpsimd_acc
362 cmp x24, #0x2c // FP/ASIMD exception
363 b.eq el0_fpsimd_exc
364 cmp x24, #0x18 // configurable trap
365 b.eq el0_undef
366 cmp x24, #0x26 // stack alignment exception
367 b.eq el0_sp_pc
368 cmp x24, #0x22 // pc alignment exception
369 b.eq el0_sp_pc
370 cmp x24, #0x00 // unknown exception in EL0
371 b.eq el0_undef
372 cmp x24, #0x30 // debug exception in EL0
373 b.ge el0_dbg
374 b el0_inv
375
376#ifdef CONFIG_COMPAT
377 .align 6
378el0_sync_compat:
379 kernel_entry 0, 32
380 mrs x25, esr_el1 // read the syndrome register
381 lsr x24, x25, #26 // exception class
382 cmp x24, #0x11 // SVC in 32-bit state
383 b.eq el0_svc_compat
384 adr lr, ret_from_exception
385 cmp x24, #0x24 // data abort in EL0
386 b.eq el0_da
387 cmp x24, #0x20 // instruction abort in EL0
388 b.eq el0_ia
389 cmp x24, #0x07 // FP/ASIMD access
390 b.eq el0_fpsimd_acc
391 cmp x24, #0x28 // FP/ASIMD exception
392 b.eq el0_fpsimd_exc
393 cmp x24, #0x00 // unknown exception in EL0
394 b.eq el0_undef
395 cmp x24, #0x30 // debug exception in EL0
396 b.ge el0_dbg
397 b el0_inv
398el0_svc_compat:
399 /*
400 * AArch32 syscall handling
401 */
402 adr stbl, compat_sys_call_table // load compat syscall table pointer
403 uxtw scno, w7 // syscall number in w7 (r7)
404 mov sc_nr, #__NR_compat_syscalls
405 b el0_svc_naked
406
407 .align 6
408el0_irq_compat:
409 kernel_entry 0, 32
410 b el0_irq_naked
411#endif
412
413el0_da:
414 /*
415 * Data abort handling
416 */
417 mrs x0, far_el1
418 disable_step x1
419 isb
420 enable_dbg
421 // enable interrupts before calling the main handler
422 enable_irq
423 mov x1, x25
424 mov x2, sp
425 b do_mem_abort
426el0_ia:
427 /*
428 * Instruction abort handling
429 */
430 mrs x0, far_el1
431 disable_step x1
432 isb
433 enable_dbg
434 // enable interrupts before calling the main handler
435 enable_irq
436 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
437 mov x2, sp
438 b do_mem_abort
439el0_fpsimd_acc:
440 /*
441 * Floating Point or Advanced SIMD access
442 */
443 mov x0, x25
444 mov x1, sp
445 b do_fpsimd_acc
446el0_fpsimd_exc:
447 /*
448 * Floating Point or Advanced SIMD exception
449 */
450 mov x0, x25
451 mov x1, sp
452 b do_fpsimd_exc
453el0_sp_pc:
454 /*
455 * Stack or PC alignment exception handling
456 */
457 mrs x0, far_el1
458 disable_step x1
459 isb
460 enable_dbg
461 // enable interrupts before calling the main handler
462 enable_irq
463 mov x1, x25
464 mov x2, sp
465 b do_sp_pc_abort
466el0_undef:
467 /*
468 * Undefined instruction
469 */
470 mov x0, sp
471 b do_undefinstr
472el0_dbg:
473 /*
474 * Debug exception handling
475 */
476 tbnz x24, #0, el0_inv // EL0 only
477 mrs x0, far_el1
478 disable_step x1
479 mov x1, x25
480 mov x2, sp
481 b do_debug_exception
482el0_inv:
483 mov x0, sp
484 mov x1, #BAD_SYNC
485 mrs x2, esr_el1
486 b bad_mode
487ENDPROC(el0_sync)
488
489 .align 6
490el0_irq:
491 kernel_entry 0
492el0_irq_naked:
493 disable_step x1
494 isb
495 enable_dbg
496#ifdef CONFIG_TRACE_IRQFLAGS
497 bl trace_hardirqs_off
498#endif
499 get_thread_info tsk
500#ifdef CONFIG_PREEMPT
501 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
502 add x23, x24, #1 // increment it
503 str x23, [tsk, #TI_PREEMPT]
504#endif
505 irq_handler
506#ifdef CONFIG_PREEMPT
507 ldr x0, [tsk, #TI_PREEMPT]
508 str x24, [tsk, #TI_PREEMPT]
509 cmp x0, x23
510 b.eq 1f
511 mov x1, #0
512 str x1, [x1] // BUG
5131:
514#endif
515#ifdef CONFIG_TRACE_IRQFLAGS
516 bl trace_hardirqs_on
517#endif
518 b ret_to_user
519ENDPROC(el0_irq)
520
521/*
522 * This is the return code to user mode for abort handlers
523 */
524ret_from_exception:
525 get_thread_info tsk
526 b ret_to_user
527ENDPROC(ret_from_exception)
528
529/*
530 * Register switch for AArch64. The callee-saved registers need to be saved
531 * and restored. On entry:
532 * x0 = previous task_struct (must be preserved across the switch)
533 * x1 = next task_struct
534 * Previous and next are guaranteed not to be the same.
535 *
536 */
537ENTRY(cpu_switch_to)
538 add x8, x0, #THREAD_CPU_CONTEXT
539 mov x9, sp
540 stp x19, x20, [x8], #16 // store callee-saved registers
541 stp x21, x22, [x8], #16
542 stp x23, x24, [x8], #16
543 stp x25, x26, [x8], #16
544 stp x27, x28, [x8], #16
545 stp x29, x9, [x8], #16
546 str lr, [x8]
547 add x8, x1, #THREAD_CPU_CONTEXT
548 ldp x19, x20, [x8], #16 // restore callee-saved registers
549 ldp x21, x22, [x8], #16
550 ldp x23, x24, [x8], #16
551 ldp x25, x26, [x8], #16
552 ldp x27, x28, [x8], #16
553 ldp x29, x9, [x8], #16
554 ldr lr, [x8]
555 mov sp, x9
556 ret
557ENDPROC(cpu_switch_to)
558
559/*
560 * This is the fast syscall return path. We do as little as possible here,
561 * and this includes saving x0 back into the kernel stack.
562 */
563ret_fast_syscall:
564 disable_irq // disable interrupts
565 ldr x1, [tsk, #TI_FLAGS]
566 and x2, x1, #_TIF_WORK_MASK
567 cbnz x2, fast_work_pending
568 tbz x1, #TIF_SINGLESTEP, fast_exit
569 disable_dbg
570 enable_step x2
571fast_exit:
572 kernel_exit 0, ret = 1
573
574/*
575 * Ok, we need to do extra processing, enter the slow path.
576 */
577fast_work_pending:
578 str x0, [sp, #S_X0] // returned x0
579work_pending:
580 tbnz x1, #TIF_NEED_RESCHED, work_resched
581 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
582 ldr x2, [sp, #S_PSTATE]
583 mov x0, sp // 'regs'
584 tst x2, #PSR_MODE_MASK // user mode regs?
585 b.ne no_work_pending // returning to kernel
586 bl do_notify_resume
587 b ret_to_user
588work_resched:
589 enable_dbg
590 bl schedule
591
592/*
593 * "slow" syscall return path.
594 */
595ENTRY(ret_to_user)
596 disable_irq // disable interrupts
597 ldr x1, [tsk, #TI_FLAGS]
598 and x2, x1, #_TIF_WORK_MASK
599 cbnz x2, work_pending
600 tbz x1, #TIF_SINGLESTEP, no_work_pending
601 disable_dbg
602 enable_step x2
603no_work_pending:
604 kernel_exit 0, ret = 0
605ENDPROC(ret_to_user)
606
607/*
608 * This is how we return from a fork.
609 */
610ENTRY(ret_from_fork)
611 bl schedule_tail
612 get_thread_info tsk
613 b ret_to_user
614ENDPROC(ret_from_fork)
615
616/*
617 * SVC handler.
618 */
619 .align 6
620el0_svc:
621 adrp stbl, sys_call_table // load syscall table pointer
622 uxtw scno, w8 // syscall number in w8
623 mov sc_nr, #__NR_syscalls
624el0_svc_naked: // compat entry point
625 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
626 disable_step x16
627 isb
628 enable_dbg
629 enable_irq
630
631 get_thread_info tsk
632 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
633 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
634 adr lr, ret_fast_syscall // return address
635 cmp scno, sc_nr // check upper syscall limit
636 b.hs ni_sys
637 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
638 br x16 // call sys_* routine
639ni_sys:
640 mov x0, sp
641 b do_ni_syscall
642ENDPROC(el0_svc)
643
644 /*
645 * This is the really slow path. We're going to be doing context
646 * switches, and waiting for our parent to respond.
647 */
648__sys_trace:
649 mov x1, sp
650 mov w0, #0 // trace entry
651 bl syscall_trace
652 adr lr, __sys_trace_return // return address
653 uxtw scno, w0 // syscall number (possibly new)
654 mov x1, sp // pointer to regs
655 cmp scno, sc_nr // check upper syscall limit
656 b.hs ni_sys
657 ldp x0, x1, [sp] // restore the syscall args
658 ldp x2, x3, [sp, #S_X2]
659 ldp x4, x5, [sp, #S_X4]
660 ldp x6, x7, [sp, #S_X6]
661 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
662 br x16 // call sys_* routine
663
664__sys_trace_return:
665 str x0, [sp] // save returned x0
666 mov x1, sp
667 mov w0, #1 // trace exit
668 bl syscall_trace
669 b ret_to_user
670
671/*
672 * Special system call wrappers.
673 */
674ENTRY(sys_execve_wrapper)
675 mov x3, sp
676 b sys_execve
677ENDPROC(sys_execve_wrapper)
678
679ENTRY(sys_clone_wrapper)
680 mov x5, sp
681 b sys_clone
682ENDPROC(sys_clone_wrapper)
683
684ENTRY(sys_rt_sigreturn_wrapper)
685 mov x0, sp
686 b sys_rt_sigreturn
687ENDPROC(sys_rt_sigreturn_wrapper)
688
689ENTRY(sys_sigaltstack_wrapper)
690 ldr x2, [sp, #S_SP]
691 b sys_sigaltstack
692ENDPROC(sys_sigaltstack_wrapper)
693
694ENTRY(handle_arch_irq)
695 .quad 0
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
new file mode 100644
index 000000000000..e8b8357aedb4
--- /dev/null
+++ b/arch/arm64/kernel/fpsimd.c
@@ -0,0 +1,106 @@
1/*
2 * FP/SIMD context switching and fault handling
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/sched.h>
23#include <linux/signal.h>
24
25#include <asm/fpsimd.h>
26#include <asm/cputype.h>
27
28#define FPEXC_IOF (1 << 0)
29#define FPEXC_DZF (1 << 1)
30#define FPEXC_OFF (1 << 2)
31#define FPEXC_UFF (1 << 3)
32#define FPEXC_IXF (1 << 4)
33#define FPEXC_IDF (1 << 7)
34
35/*
36 * Trapped FP/ASIMD access.
37 */
38void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
39{
40 /* TODO: implement lazy context saving/restoring */
41 WARN_ON(1);
42}
43
44/*
45 * Raise a SIGFPE for the current process.
46 */
47void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
48{
49 siginfo_t info;
50 unsigned int si_code = 0;
51
52 if (esr & FPEXC_IOF)
53 si_code = FPE_FLTINV;
54 else if (esr & FPEXC_DZF)
55 si_code = FPE_FLTDIV;
56 else if (esr & FPEXC_OFF)
57 si_code = FPE_FLTOVF;
58 else if (esr & FPEXC_UFF)
59 si_code = FPE_FLTUND;
60 else if (esr & FPEXC_IXF)
61 si_code = FPE_FLTRES;
62
63 memset(&info, 0, sizeof(info));
64 info.si_signo = SIGFPE;
65 info.si_code = si_code;
66 info.si_addr = (void __user *)instruction_pointer(regs);
67
68 send_sig_info(SIGFPE, &info, current);
69}
70
71void fpsimd_thread_switch(struct task_struct *next)
72{
73 /* check if not kernel threads */
74 if (current->mm)
75 fpsimd_save_state(&current->thread.fpsimd_state);
76 if (next->mm)
77 fpsimd_load_state(&next->thread.fpsimd_state);
78}
79
80void fpsimd_flush_thread(void)
81{
82 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
83 fpsimd_load_state(&current->thread.fpsimd_state);
84}
85
86/*
87 * FP/SIMD support code initialisation.
88 */
89static int __init fpsimd_init(void)
90{
91 u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
92
93 if (pfr & (0xf << 16)) {
94 pr_notice("Floating-point is not implemented\n");
95 return 0;
96 }
97 elf_hwcap |= HWCAP_FP;
98
99 if (pfr & (0xf << 20))
100 pr_notice("Advanced SIMD is not implemented\n");
101 else
102 elf_hwcap |= HWCAP_ASIMD;
103
104 return 0;
105}
106late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
new file mode 100644
index 000000000000..a2f02b63eae9
--- /dev/null
+++ b/arch/arm64/kernel/head.S
@@ -0,0 +1,510 @@
1/*
2 * Low-level CPU initialisation
3 * Based on arch/arm/kernel/head.S
4 *
5 * Copyright (C) 1994-2002 Russell King
6 * Copyright (C) 2003-2012 ARM Ltd.
7 * Authors: Catalin Marinas <catalin.marinas@arm.com>
8 * Will Deacon <will.deacon@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25
26#include <asm/assembler.h>
27#include <asm/ptrace.h>
28#include <asm/asm-offsets.h>
29#include <asm/memory.h>
30#include <asm/thread_info.h>
31#include <asm/pgtable-hwdef.h>
32#include <asm/pgtable.h>
33#include <asm/page.h>
34
35/*
36 * swapper_pg_dir is the virtual address of the initial page table. We place
37 * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
38 * 2 pages and is placed below swapper_pg_dir.
39 */
40#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
41
42#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
43#error KERNEL_RAM_VADDR must start at 0xXXX80000
44#endif
45
46#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
47#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
48
49 .globl swapper_pg_dir
50 .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
51
52 .globl idmap_pg_dir
53 .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
54
55 .macro pgtbl, ttb0, ttb1, phys
56 add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
57 sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
58 .endm
59
60#ifdef CONFIG_ARM64_64K_PAGES
61#define BLOCK_SHIFT PAGE_SHIFT
62#define BLOCK_SIZE PAGE_SIZE
63#else
64#define BLOCK_SHIFT SECTION_SHIFT
65#define BLOCK_SIZE SECTION_SIZE
66#endif
67
68#define KERNEL_START KERNEL_RAM_VADDR
69#define KERNEL_END _end
70
71/*
72 * Initial memory map attributes.
73 */
74#ifndef CONFIG_SMP
75#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
76#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
77#else
78#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
79#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
80#endif
81
82#ifdef CONFIG_ARM64_64K_PAGES
83#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
84#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS
85#else
86#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
87#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS
88#endif
89
90/*
91 * Kernel startup entry point.
92 * ---------------------------
93 *
94 * The requirements are:
95 * MMU = off, D-cache = off, I-cache = on or off,
96 * x0 = physical address to the FDT blob.
97 *
98 * This code is mostly position independent so you call this at
99 * __pa(PAGE_OFFSET + TEXT_OFFSET).
100 *
101 * Note that the callee-saved registers are used for storing variables
102 * that are useful before the MMU is enabled. The allocations are described
103 * in the entry routines.
104 */
105 __HEAD
106
107 /*
108 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
109 */
110 b stext // branch to kernel start, magic
111 .long 0 // reserved
112 .quad TEXT_OFFSET // Image load offset from start of RAM
113 .quad 0 // reserved
114 .quad 0 // reserved
115
116ENTRY(stext)
117 mov x21, x0 // x21=FDT
118 bl el2_setup // Drop to EL1
119 mrs x22, midr_el1 // x22=cpuid
120 mov x0, x22
121 bl lookup_processor_type
122 mov x23, x0 // x23=current cpu_table
123 cbz x23, __error_p // invalid processor (x23=0)?
124 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
125 bl __vet_fdt
126 bl __create_page_tables // x25=TTBR0, x26=TTBR1
127 /*
128 * The following calls CPU specific code in a position independent
129 * manner. See arch/arm64/mm/proc.S for details. x23 = base of
130 * cpu_info structure selected by lookup_processor_type above.
131 * On return, the CPU will be ready for the MMU to be turned on and
132 * the TCR will have been set.
133 */
134 ldr x27, __switch_data // address to jump to after
135 // MMU has been enabled
136 adr lr, __enable_mmu // return (PIC) address
137 ldr x12, [x23, #CPU_INFO_SETUP]
138 add x12, x12, x28 // __virt_to_phys
139 br x12 // initialise processor
140ENDPROC(stext)
141
142/*
143 * If we're fortunate enough to boot at EL2, ensure that the world is
144 * sane before dropping to EL1.
145 */
146ENTRY(el2_setup)
147 mrs x0, CurrentEL
148 cmp x0, #PSR_MODE_EL2t
149 ccmp x0, #PSR_MODE_EL2h, #0x4, ne
150 b.eq 1f
151 ret
152
153 /* Hyp configuration. */
1541: mov x0, #(1 << 31) // 64-bit EL1
155 msr hcr_el2, x0
156
157 /* Generic timers. */
158 mrs x0, cnthctl_el2
159 orr x0, x0, #3 // Enable EL1 physical timers
160 msr cnthctl_el2, x0
161
162 /* Populate ID registers. */
163 mrs x0, midr_el1
164 mrs x1, mpidr_el1
165 msr vpidr_el2, x0
166 msr vmpidr_el2, x1
167
168 /* sctlr_el1 */
169 mov x0, #0x0800 // Set/clear RES{1,0} bits
170 movk x0, #0x30d0, lsl #16
171 msr sctlr_el1, x0
172
173 /* Coprocessor traps. */
174 mov x0, #0x33ff
175 msr cptr_el2, x0 // Disable copro. traps to EL2
176
177#ifdef CONFIG_COMPAT
178 msr hstr_el2, xzr // Disable CP15 traps to EL2
179#endif
180
181 /* spsr */
182 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
183 PSR_MODE_EL1h)
184 msr spsr_el2, x0
185 msr elr_el2, lr
186 eret
187ENDPROC(el2_setup)
188
189 .align 3
1902: .quad .
191 .quad PAGE_OFFSET
192
193#ifdef CONFIG_SMP
194 .pushsection .smp.pen.text, "ax"
195 .align 3
1961: .quad .
197 .quad secondary_holding_pen_release
198
199 /*
200 * This provides a "holding pen" for platforms to hold all secondary
201 * cores are held until we're ready for them to initialise.
202 */
203ENTRY(secondary_holding_pen)
204 bl el2_setup // Drop to EL1
205 mrs x0, mpidr_el1
206 and x0, x0, #15 // CPU number
207 adr x1, 1b
208 ldp x2, x3, [x1]
209 sub x1, x1, x2
210 add x3, x3, x1
211pen: ldr x4, [x3]
212 cmp x4, x0
213 b.eq secondary_startup
214 wfe
215 b pen
216ENDPROC(secondary_holding_pen)
217 .popsection
218
219ENTRY(secondary_startup)
220 /*
221 * Common entry point for secondary CPUs.
222 */
223 mrs x22, midr_el1 // x22=cpuid
224 mov x0, x22
225 bl lookup_processor_type
226 mov x23, x0 // x23=current cpu_table
227 cbz x23, __error_p // invalid processor (x23=0)?
228
229 bl __calc_phys_offset // x24=phys offset
230 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
231 ldr x12, [x23, #CPU_INFO_SETUP]
232 add x12, x12, x28 // __virt_to_phys
233 blr x12 // initialise processor
234
235 ldr x21, =secondary_data
236 ldr x27, =__secondary_switched // address to jump to after enabling the MMU
237 b __enable_mmu
238ENDPROC(secondary_startup)
239
240ENTRY(__secondary_switched)
241 ldr x0, [x21] // get secondary_data.stack
242 mov sp, x0
243 mov x29, #0
244 b secondary_start_kernel
245ENDPROC(__secondary_switched)
246#endif /* CONFIG_SMP */
247
248/*
249 * Setup common bits before finally enabling the MMU. Essentially this is just
250 * loading the page table pointer and vector base registers.
251 *
252 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
253 * the MMU.
254 */
255__enable_mmu:
256 ldr x5, =vectors
257 msr vbar_el1, x5
258 msr ttbr0_el1, x25 // load TTBR0
259 msr ttbr1_el1, x26 // load TTBR1
260 isb
261 b __turn_mmu_on
262ENDPROC(__enable_mmu)
263
264/*
265 * Enable the MMU. This completely changes the structure of the visible memory
266 * space. You will not be able to trace execution through this.
267 *
268 * x0 = system control register
269 * x27 = *virtual* address to jump to upon completion
270 *
271 * other registers depend on the function called upon completion
272 */
273 .align 6
274__turn_mmu_on:
275 msr sctlr_el1, x0
276 isb
277 br x27
278ENDPROC(__turn_mmu_on)
279
280/*
281 * Calculate the start of physical memory.
282 */
283__calc_phys_offset:
284 adr x0, 1f
285 ldp x1, x2, [x0]
286 sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
287 add x24, x2, x28 // x24 = PHYS_OFFSET
288 ret
289ENDPROC(__calc_phys_offset)
290
291 .align 3
2921: .quad .
293 .quad PAGE_OFFSET
294
295/*
296 * Macro to populate the PGD for the corresponding block entry in the next
297 * level (tbl) for the given virtual address.
298 *
299 * Preserves: pgd, tbl, virt
300 * Corrupts: tmp1, tmp2
301 */
302 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
303 lsr \tmp1, \virt, #PGDIR_SHIFT
304 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
305 orr \tmp2, \tbl, #3 // PGD entry table type
306 str \tmp2, [\pgd, \tmp1, lsl #3]
307 .endm
308
309/*
310 * Macro to populate block entries in the page table for the start..end
311 * virtual range (inclusive).
312 *
313 * Preserves: tbl, flags
314 * Corrupts: phys, start, end, pstate
315 */
316 .macro create_block_map, tbl, flags, phys, start, end, idmap=0
317 lsr \phys, \phys, #BLOCK_SHIFT
318 .if \idmap
319 and \start, \phys, #PTRS_PER_PTE - 1 // table index
320 .else
321 lsr \start, \start, #BLOCK_SHIFT
322 and \start, \start, #PTRS_PER_PTE - 1 // table index
323 .endif
324 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
325 .ifnc \start,\end
326 lsr \end, \end, #BLOCK_SHIFT
327 and \end, \end, #PTRS_PER_PTE - 1 // table end index
328 .endif
3299999: str \phys, [\tbl, \start, lsl #3] // store the entry
330 .ifnc \start,\end
331 add \start, \start, #1 // next entry
332 add \phys, \phys, #BLOCK_SIZE // next block
333 cmp \start, \end
334 b.ls 9999b
335 .endif
336 .endm
337
338/*
339 * Setup the initial page tables. We only setup the barest amount which is
340 * required to get the kernel running. The following sections are required:
341 * - identity mapping to enable the MMU (low address, TTBR0)
342 * - first few MB of the kernel linear mapping to jump to once the MMU has
343 * been enabled, including the FDT blob (TTBR1)
344 */
345__create_page_tables:
346 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
347
348 /*
349 * Clear the idmap and swapper page tables.
350 */
351 mov x0, x25
352 add x6, x26, #SWAPPER_DIR_SIZE
3531: stp xzr, xzr, [x0], #16
354 stp xzr, xzr, [x0], #16
355 stp xzr, xzr, [x0], #16
356 stp xzr, xzr, [x0], #16
357 cmp x0, x6
358 b.lo 1b
359
360 ldr x7, =MM_MMUFLAGS
361
362 /*
363 * Create the identity mapping.
364 */
365 add x0, x25, #PAGE_SIZE // section table address
366 adr x3, __turn_mmu_on // virtual/physical address
367 create_pgd_entry x25, x0, x3, x5, x6
368 create_block_map x0, x7, x3, x5, x5, idmap=1
369
370 /*
371 * Map the kernel image (starting with PHYS_OFFSET).
372 */
373 add x0, x26, #PAGE_SIZE // section table address
374 mov x5, #PAGE_OFFSET
375 create_pgd_entry x26, x0, x5, x3, x6
376 ldr x6, =KERNEL_END - 1
377 mov x3, x24 // phys offset
378 create_block_map x0, x7, x3, x5, x6
379
380 /*
381 * Map the FDT blob (maximum 2MB; must be within 512MB of
382 * PHYS_OFFSET).
383 */
384 mov x3, x21 // FDT phys address
385 and x3, x3, #~((1 << 21) - 1) // 2MB aligned
386 mov x6, #PAGE_OFFSET
387 sub x5, x3, x24 // subtract PHYS_OFFSET
388 tst x5, #~((1 << 29) - 1) // within 512MB?
389 csel x21, xzr, x21, ne // zero the FDT pointer
390 b.ne 1f
391 add x5, x5, x6 // __va(FDT blob)
392 add x6, x5, #1 << 21 // 2MB for the FDT blob
393 sub x6, x6, #1 // inclusive range
394 create_block_map x0, x7, x3, x5, x6
3951:
396 ret
397ENDPROC(__create_page_tables)
398 .ltorg
399
400 .align 3
401 .type __switch_data, %object
402__switch_data:
403 .quad __mmap_switched
404 .quad __data_loc // x4
405 .quad _data // x5
406 .quad __bss_start // x6
407 .quad _end // x7
408 .quad processor_id // x4
409 .quad __fdt_pointer // x5
410 .quad memstart_addr // x6
411 .quad init_thread_union + THREAD_START_SP // sp
412
413/*
414 * The following fragment of code is executed with the MMU on in MMU mode, and
415 * uses absolute addresses; this is not position independent.
416 */
417__mmap_switched:
418 adr x3, __switch_data + 8
419
420 ldp x4, x5, [x3], #16
421 ldp x6, x7, [x3], #16
422 cmp x4, x5 // Copy data segment if needed
4231: ccmp x5, x6, #4, ne
424 b.eq 2f
425 ldr x16, [x4], #8
426 str x16, [x5], #8
427 b 1b
4282:
4291: cmp x6, x7
430 b.hs 2f
431 str xzr, [x6], #8 // Clear BSS
432 b 1b
4332:
434 ldp x4, x5, [x3], #16
435 ldr x6, [x3], #8
436 ldr x16, [x3]
437 mov sp, x16
438 str x22, [x4] // Save processor ID
439 str x21, [x5] // Save FDT pointer
440 str x24, [x6] // Save PHYS_OFFSET
441 mov x29, #0
442 b start_kernel
443ENDPROC(__mmap_switched)
444
445/*
446 * Exception handling. Something went wrong and we can't proceed. We ought to
447 * tell the user, but since we don't have any guarantee that we're even
448 * running on the right architecture, we do virtually nothing.
449 */
450__error_p:
451ENDPROC(__error_p)
452
453__error:
4541: nop
455 b 1b
456ENDPROC(__error)
457
458/*
459 * This function gets the processor ID in w0 and searches the cpu_table[] for
460 * a match. It returns a pointer to the struct cpu_info it found. The
461 * cpu_table[] must end with an empty (all zeros) structure.
462 *
463 * This routine can be called via C code and it needs to work with the MMU
464 * both disabled and enabled (the offset is calculated automatically).
465 */
466ENTRY(lookup_processor_type)
467 adr x1, __lookup_processor_type_data
468 ldp x2, x3, [x1]
469 sub x1, x1, x2 // get offset between VA and PA
470 add x3, x3, x1 // convert VA to PA
4711:
472 ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
473 cbz w5, 2f // end of list?
474 and w6, w6, w0
475 cmp w5, w6
476 b.eq 3f
477 add x3, x3, #CPU_INFO_SZ
478 b 1b
4792:
480 mov x3, #0 // unknown processor
4813:
482 mov x0, x3
483 ret
484ENDPROC(lookup_processor_type)
485
486 .align 3
487 .type __lookup_processor_type_data, %object
488__lookup_processor_type_data:
489 .quad .
490 .quad cpu_table
491 .size __lookup_processor_type_data, . - __lookup_processor_type_data
492
493/*
494 * Determine validity of the x21 FDT pointer.
495 * The dtb must be 8-byte aligned and live in the first 512M of memory.
496 */
497__vet_fdt:
498 tst x21, #0x7
499 b.ne 1f
500 cmp x21, x24
501 b.lt 1f
502 mov x0, #(1 << 29)
503 add x0, x0, x24
504 cmp x21, x0
505 b.ge 1f
506 ret
5071:
508 mov x21, #0
509 ret
510ENDPROC(__vet_fdt)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..5ab825c59db9
--- /dev/null
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -0,0 +1,880 @@
1/*
2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
4 *
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) "hw-breakpoint: " fmt
22
23#include <linux/errno.h>
24#include <linux/hw_breakpoint.h>
25#include <linux/perf_event.h>
26#include <linux/ptrace.h>
27#include <linux/smp.h>
28
29#include <asm/compat.h>
30#include <asm/current.h>
31#include <asm/debug-monitors.h>
32#include <asm/hw_breakpoint.h>
33#include <asm/kdebug.h>
34#include <asm/traps.h>
35#include <asm/cputype.h>
36#include <asm/system_misc.h>
37
38/* Breakpoint currently in use for each BRP. */
39static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
40
41/* Watchpoint currently in use for each WRP. */
42static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
43
44/* Currently stepping a per-CPU kernel breakpoint. */
45static DEFINE_PER_CPU(int, stepping_kernel_bp);
46
47/* Number of BRP/WRP registers on this CPU. */
48static int core_num_brps;
49static int core_num_wrps;
50
51/* Determine number of BRP registers available. */
52static int get_num_brps(void)
53{
54 return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
55}
56
57/* Determine number of WRP registers available. */
58static int get_num_wrps(void)
59{
60 return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
61}
62
63int hw_breakpoint_slots(int type)
64{
65 /*
66 * We can be called early, so don't rely on
67 * our static variables being initialised.
68 */
69 switch (type) {
70 case TYPE_INST:
71 return get_num_brps();
72 case TYPE_DATA:
73 return get_num_wrps();
74 default:
75 pr_warning("unknown slot type: %d\n", type);
76 return 0;
77 }
78}
79
80#define READ_WB_REG_CASE(OFF, N, REG, VAL) \
81 case (OFF + N): \
82 AARCH64_DBG_READ(N, REG, VAL); \
83 break
84
85#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
86 case (OFF + N): \
87 AARCH64_DBG_WRITE(N, REG, VAL); \
88 break
89
90#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
91 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
96 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
97 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
98 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
99 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
100 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
101 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
102 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
103 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
104 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
105 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
106 READ_WB_REG_CASE(OFF, 15, REG, VAL)
107
108#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
109 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
114 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
115 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
116 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
117 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
118 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
119 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
120 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
121 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
122 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
123 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
124 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
125
126static u64 read_wb_reg(int reg, int n)
127{
128 u64 val = 0;
129
130 switch (reg + n) {
131 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
132 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
133 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
134 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
135 default:
136 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
137 }
138
139 return val;
140}
141
142static void write_wb_reg(int reg, int n, u64 val)
143{
144 switch (reg + n) {
145 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
146 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
147 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
148 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
149 default:
150 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
151 }
152 isb();
153}
154
155/*
156 * Convert a breakpoint privilege level to the corresponding exception
157 * level.
158 */
159static enum debug_el debug_exception_level(int privilege)
160{
161 switch (privilege) {
162 case AARCH64_BREAKPOINT_EL0:
163 return DBG_ACTIVE_EL0;
164 case AARCH64_BREAKPOINT_EL1:
165 return DBG_ACTIVE_EL1;
166 default:
167 pr_warning("invalid breakpoint privilege level %d\n", privilege);
168 return -EINVAL;
169 }
170}
171
172/*
173 * Install a perf counter breakpoint.
174 */
175int arch_install_hw_breakpoint(struct perf_event *bp)
176{
177 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178 struct perf_event **slot, **slots;
179 struct debug_info *debug_info = &current->thread.debug;
180 int i, max_slots, ctrl_reg, val_reg, reg_enable;
181 u32 ctrl;
182
183 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
184 /* Breakpoint */
185 ctrl_reg = AARCH64_DBG_REG_BCR;
186 val_reg = AARCH64_DBG_REG_BVR;
187 slots = __get_cpu_var(bp_on_reg);
188 max_slots = core_num_brps;
189 reg_enable = !debug_info->bps_disabled;
190 } else {
191 /* Watchpoint */
192 ctrl_reg = AARCH64_DBG_REG_WCR;
193 val_reg = AARCH64_DBG_REG_WVR;
194 slots = __get_cpu_var(wp_on_reg);
195 max_slots = core_num_wrps;
196 reg_enable = !debug_info->wps_disabled;
197 }
198
199 for (i = 0; i < max_slots; ++i) {
200 slot = &slots[i];
201
202 if (!*slot) {
203 *slot = bp;
204 break;
205 }
206 }
207
208 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
209 return -ENOSPC;
210
211 /* Ensure debug monitors are enabled at the correct exception level. */
212 enable_debug_monitors(debug_exception_level(info->ctrl.privilege));
213
214 /* Setup the address register. */
215 write_wb_reg(val_reg, i, info->address);
216
217 /* Setup the control register. */
218 ctrl = encode_ctrl_reg(info->ctrl);
219 write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
220
221 return 0;
222}
223
224void arch_uninstall_hw_breakpoint(struct perf_event *bp)
225{
226 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
227 struct perf_event **slot, **slots;
228 int i, max_slots, base;
229
230 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
231 /* Breakpoint */
232 base = AARCH64_DBG_REG_BCR;
233 slots = __get_cpu_var(bp_on_reg);
234 max_slots = core_num_brps;
235 } else {
236 /* Watchpoint */
237 base = AARCH64_DBG_REG_WCR;
238 slots = __get_cpu_var(wp_on_reg);
239 max_slots = core_num_wrps;
240 }
241
242 /* Remove the breakpoint. */
243 for (i = 0; i < max_slots; ++i) {
244 slot = &slots[i];
245
246 if (*slot == bp) {
247 *slot = NULL;
248 break;
249 }
250 }
251
252 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
253 return;
254
255 /* Reset the control register. */
256 write_wb_reg(base, i, 0);
257
258 /* Release the debug monitors for the correct exception level. */
259 disable_debug_monitors(debug_exception_level(info->ctrl.privilege));
260}
261
262static int get_hbp_len(u8 hbp_len)
263{
264 unsigned int len_in_bytes = 0;
265
266 switch (hbp_len) {
267 case ARM_BREAKPOINT_LEN_1:
268 len_in_bytes = 1;
269 break;
270 case ARM_BREAKPOINT_LEN_2:
271 len_in_bytes = 2;
272 break;
273 case ARM_BREAKPOINT_LEN_4:
274 len_in_bytes = 4;
275 break;
276 case ARM_BREAKPOINT_LEN_8:
277 len_in_bytes = 8;
278 break;
279 }
280
281 return len_in_bytes;
282}
283
284/*
285 * Check whether bp virtual address is in kernel space.
286 */
287int arch_check_bp_in_kernelspace(struct perf_event *bp)
288{
289 unsigned int len;
290 unsigned long va;
291 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
292
293 va = info->address;
294 len = get_hbp_len(info->ctrl.len);
295
296 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
297}
298
299/*
300 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
301 * Hopefully this will disappear when ptrace can bypass the conversion
302 * to generic breakpoint descriptions.
303 */
304int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
305 int *gen_len, int *gen_type)
306{
307 /* Type */
308 switch (ctrl.type) {
309 case ARM_BREAKPOINT_EXECUTE:
310 *gen_type = HW_BREAKPOINT_X;
311 break;
312 case ARM_BREAKPOINT_LOAD:
313 *gen_type = HW_BREAKPOINT_R;
314 break;
315 case ARM_BREAKPOINT_STORE:
316 *gen_type = HW_BREAKPOINT_W;
317 break;
318 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
319 *gen_type = HW_BREAKPOINT_RW;
320 break;
321 default:
322 return -EINVAL;
323 }
324
325 /* Len */
326 switch (ctrl.len) {
327 case ARM_BREAKPOINT_LEN_1:
328 *gen_len = HW_BREAKPOINT_LEN_1;
329 break;
330 case ARM_BREAKPOINT_LEN_2:
331 *gen_len = HW_BREAKPOINT_LEN_2;
332 break;
333 case ARM_BREAKPOINT_LEN_4:
334 *gen_len = HW_BREAKPOINT_LEN_4;
335 break;
336 case ARM_BREAKPOINT_LEN_8:
337 *gen_len = HW_BREAKPOINT_LEN_8;
338 break;
339 default:
340 return -EINVAL;
341 }
342
343 return 0;
344}
345
346/*
347 * Construct an arch_hw_breakpoint from a perf_event.
348 */
349static int arch_build_bp_info(struct perf_event *bp)
350{
351 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
352
353 /* Type */
354 switch (bp->attr.bp_type) {
355 case HW_BREAKPOINT_X:
356 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
357 break;
358 case HW_BREAKPOINT_R:
359 info->ctrl.type = ARM_BREAKPOINT_LOAD;
360 break;
361 case HW_BREAKPOINT_W:
362 info->ctrl.type = ARM_BREAKPOINT_STORE;
363 break;
364 case HW_BREAKPOINT_RW:
365 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
366 break;
367 default:
368 return -EINVAL;
369 }
370
371 /* Len */
372 switch (bp->attr.bp_len) {
373 case HW_BREAKPOINT_LEN_1:
374 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
375 break;
376 case HW_BREAKPOINT_LEN_2:
377 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
378 break;
379 case HW_BREAKPOINT_LEN_4:
380 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
381 break;
382 case HW_BREAKPOINT_LEN_8:
383 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 /*
390 * On AArch64, we only permit breakpoints of length 4, whereas
391 * AArch32 also requires breakpoints of length 2 for Thumb.
392 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
393 */
394 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
395 if (is_compat_task()) {
396 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
397 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
398 return -EINVAL;
399 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
400 /*
401 * FIXME: Some tools (I'm looking at you perf) assume
402 * that breakpoints should be sizeof(long). This
403 * is nonsense. For now, we fix up the parameter
404 * but we should probably return -EINVAL instead.
405 */
406 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
407 }
408 }
409
410 /* Address */
411 info->address = bp->attr.bp_addr;
412
413 /*
414 * Privilege
415 * Note that we disallow combined EL0/EL1 breakpoints because
416 * that would complicate the stepping code.
417 */
418 if (arch_check_bp_in_kernelspace(bp))
419 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
420 else
421 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
422
423 /* Enabled? */
424 info->ctrl.enabled = !bp->attr.disabled;
425
426 return 0;
427}
428
429/*
430 * Validate the arch-specific HW Breakpoint register settings.
431 */
432int arch_validate_hwbkpt_settings(struct perf_event *bp)
433{
434 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
435 int ret;
436 u64 alignment_mask, offset;
437
438 /* Build the arch_hw_breakpoint. */
439 ret = arch_build_bp_info(bp);
440 if (ret)
441 return ret;
442
443 /*
444 * Check address alignment.
445 * We don't do any clever alignment correction for watchpoints
446 * because using 64-bit unaligned addresses is deprecated for
447 * AArch64.
448 *
449 * AArch32 tasks expect some simple alignment fixups, so emulate
450 * that here.
451 */
452 if (is_compat_task()) {
453 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
454 alignment_mask = 0x7;
455 else
456 alignment_mask = 0x3;
457 offset = info->address & alignment_mask;
458 switch (offset) {
459 case 0:
460 /* Aligned */
461 break;
462 case 1:
463 /* Allow single byte watchpoint. */
464 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
465 break;
466 case 2:
467 /* Allow halfword watchpoints and breakpoints. */
468 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
469 break;
470 default:
471 return -EINVAL;
472 }
473
474 info->address &= ~alignment_mask;
475 info->ctrl.len <<= offset;
476 } else {
477 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
478 alignment_mask = 0x3;
479 else
480 alignment_mask = 0x7;
481 if (info->address & alignment_mask)
482 return -EINVAL;
483 }
484
485 /*
486 * Disallow per-task kernel breakpoints since these would
487 * complicate the stepping code.
488 */
489 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
490 return -EINVAL;
491
492 return 0;
493}
494
495/*
496 * Enable/disable all of the breakpoints active at the specified
497 * exception level at the register level.
498 * This is used when single-stepping after a breakpoint exception.
499 */
500static void toggle_bp_registers(int reg, enum debug_el el, int enable)
501{
502 int i, max_slots, privilege;
503 u32 ctrl;
504 struct perf_event **slots;
505
506 switch (reg) {
507 case AARCH64_DBG_REG_BCR:
508 slots = __get_cpu_var(bp_on_reg);
509 max_slots = core_num_brps;
510 break;
511 case AARCH64_DBG_REG_WCR:
512 slots = __get_cpu_var(wp_on_reg);
513 max_slots = core_num_wrps;
514 break;
515 default:
516 return;
517 }
518
519 for (i = 0; i < max_slots; ++i) {
520 if (!slots[i])
521 continue;
522
523 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
524 if (debug_exception_level(privilege) != el)
525 continue;
526
527 ctrl = read_wb_reg(reg, i);
528 if (enable)
529 ctrl |= 0x1;
530 else
531 ctrl &= ~0x1;
532 write_wb_reg(reg, i, ctrl);
533 }
534}
535
536/*
537 * Debug exception handlers.
538 */
539static int breakpoint_handler(unsigned long unused, unsigned int esr,
540 struct pt_regs *regs)
541{
542 int i, step = 0, *kernel_step;
543 u32 ctrl_reg;
544 u64 addr, val;
545 struct perf_event *bp, **slots;
546 struct debug_info *debug_info;
547 struct arch_hw_breakpoint_ctrl ctrl;
548
549 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
550 addr = instruction_pointer(regs);
551 debug_info = &current->thread.debug;
552
553 for (i = 0; i < core_num_brps; ++i) {
554 rcu_read_lock();
555
556 bp = slots[i];
557
558 if (bp == NULL)
559 goto unlock;
560
561 /* Check if the breakpoint value matches. */
562 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
563 if (val != (addr & ~0x3))
564 goto unlock;
565
566 /* Possible match, check the byte address select to confirm. */
567 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
568 decode_ctrl_reg(ctrl_reg, &ctrl);
569 if (!((1 << (addr & 0x3)) & ctrl.len))
570 goto unlock;
571
572 counter_arch_bp(bp)->trigger = addr;
573 perf_bp_event(bp, regs);
574
575 /* Do we need to handle the stepping? */
576 if (!bp->overflow_handler)
577 step = 1;
578unlock:
579 rcu_read_unlock();
580 }
581
582 if (!step)
583 return 0;
584
585 if (user_mode(regs)) {
586 debug_info->bps_disabled = 1;
587 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
588
589 /* If we're already stepping a watchpoint, just return. */
590 if (debug_info->wps_disabled)
591 return 0;
592
593 if (test_thread_flag(TIF_SINGLESTEP))
594 debug_info->suspended_step = 1;
595 else
596 user_enable_single_step(current);
597 } else {
598 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
599 kernel_step = &__get_cpu_var(stepping_kernel_bp);
600
601 if (*kernel_step != ARM_KERNEL_STEP_NONE)
602 return 0;
603
604 if (kernel_active_single_step()) {
605 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
606 } else {
607 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
608 kernel_enable_single_step(regs);
609 }
610 }
611
612 return 0;
613}
614
615static int watchpoint_handler(unsigned long addr, unsigned int esr,
616 struct pt_regs *regs)
617{
618 int i, step = 0, *kernel_step, access;
619 u32 ctrl_reg;
620 u64 val, alignment_mask;
621 struct perf_event *wp, **slots;
622 struct debug_info *debug_info;
623 struct arch_hw_breakpoint *info;
624 struct arch_hw_breakpoint_ctrl ctrl;
625
626 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
627 debug_info = &current->thread.debug;
628
629 for (i = 0; i < core_num_wrps; ++i) {
630 rcu_read_lock();
631
632 wp = slots[i];
633
634 if (wp == NULL)
635 goto unlock;
636
637 info = counter_arch_bp(wp);
638 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
639 if (is_compat_task()) {
640 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
641 alignment_mask = 0x7;
642 else
643 alignment_mask = 0x3;
644 } else {
645 alignment_mask = 0x7;
646 }
647
648 /* Check if the watchpoint value matches. */
649 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
650 if (val != (addr & ~alignment_mask))
651 goto unlock;
652
653 /* Possible match, check the byte address select to confirm. */
654 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
655 decode_ctrl_reg(ctrl_reg, &ctrl);
656 if (!((1 << (addr & alignment_mask)) & ctrl.len))
657 goto unlock;
658
659 /*
660 * Check that the access type matches.
661 * 0 => load, otherwise => store
662 */
663 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
664 HW_BREAKPOINT_R;
665 if (!(access & hw_breakpoint_type(wp)))
666 goto unlock;
667
668 info->trigger = addr;
669 perf_bp_event(wp, regs);
670
671 /* Do we need to handle the stepping? */
672 if (!wp->overflow_handler)
673 step = 1;
674
675unlock:
676 rcu_read_unlock();
677 }
678
679 if (!step)
680 return 0;
681
682 /*
683 * We always disable EL0 watchpoints because the kernel can
684 * cause these to fire via an unprivileged access.
685 */
686 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
687
688 if (user_mode(regs)) {
689 debug_info->wps_disabled = 1;
690
691 /* If we're already stepping a breakpoint, just return. */
692 if (debug_info->bps_disabled)
693 return 0;
694
695 if (test_thread_flag(TIF_SINGLESTEP))
696 debug_info->suspended_step = 1;
697 else
698 user_enable_single_step(current);
699 } else {
700 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
701 kernel_step = &__get_cpu_var(stepping_kernel_bp);
702
703 if (*kernel_step != ARM_KERNEL_STEP_NONE)
704 return 0;
705
706 if (kernel_active_single_step()) {
707 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
708 } else {
709 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
710 kernel_enable_single_step(regs);
711 }
712 }
713
714 return 0;
715}
716
717/*
718 * Handle single-step exception.
719 */
720int reinstall_suspended_bps(struct pt_regs *regs)
721{
722 struct debug_info *debug_info = &current->thread.debug;
723 int handled_exception = 0, *kernel_step;
724
725 kernel_step = &__get_cpu_var(stepping_kernel_bp);
726
727 /*
728 * Called from single-step exception handler.
729 * Return 0 if execution can resume, 1 if a SIGTRAP should be
730 * reported.
731 */
732 if (user_mode(regs)) {
733 if (debug_info->bps_disabled) {
734 debug_info->bps_disabled = 0;
735 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
736 handled_exception = 1;
737 }
738
739 if (debug_info->wps_disabled) {
740 debug_info->wps_disabled = 0;
741 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
742 handled_exception = 1;
743 }
744
745 if (handled_exception) {
746 if (debug_info->suspended_step) {
747 debug_info->suspended_step = 0;
748 /* Allow exception handling to fall-through. */
749 handled_exception = 0;
750 } else {
751 user_disable_single_step(current);
752 }
753 }
754 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
755 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
756 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
757
758 if (!debug_info->wps_disabled)
759 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
760
761 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
762 kernel_disable_single_step();
763 handled_exception = 1;
764 } else {
765 handled_exception = 0;
766 }
767
768 *kernel_step = ARM_KERNEL_STEP_NONE;
769 }
770
771 return !handled_exception;
772}
773
774/*
775 * Context-switcher for restoring suspended breakpoints.
776 */
777void hw_breakpoint_thread_switch(struct task_struct *next)
778{
779 /*
780 * current next
781 * disabled: 0 0 => The usual case, NOTIFY_DONE
782 * 0 1 => Disable the registers
783 * 1 0 => Enable the registers
784 * 1 1 => NOTIFY_DONE. per-task bps will
785 * get taken care of by perf.
786 */
787
788 struct debug_info *current_debug_info, *next_debug_info;
789
790 current_debug_info = &current->thread.debug;
791 next_debug_info = &next->thread.debug;
792
793 /* Update breakpoints. */
794 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
795 toggle_bp_registers(AARCH64_DBG_REG_BCR,
796 DBG_ACTIVE_EL0,
797 !next_debug_info->bps_disabled);
798
799 /* Update watchpoints. */
800 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
801 toggle_bp_registers(AARCH64_DBG_REG_WCR,
802 DBG_ACTIVE_EL0,
803 !next_debug_info->wps_disabled);
804}
805
806/*
807 * CPU initialisation.
808 */
809static void reset_ctrl_regs(void *unused)
810{
811 int i;
812
813 for (i = 0; i < core_num_brps; ++i) {
814 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
815 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
816 }
817
818 for (i = 0; i < core_num_wrps; ++i) {
819 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
820 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
821 }
822}
823
824static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
825 unsigned long action,
826 void *hcpu)
827{
828 int cpu = (long)hcpu;
829 if (action == CPU_ONLINE)
830 smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1);
831 return NOTIFY_OK;
832}
833
834static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
835 .notifier_call = hw_breakpoint_reset_notify,
836};
837
838/*
839 * One-time initialisation.
840 */
841static int __init arch_hw_breakpoint_init(void)
842{
843 core_num_brps = get_num_brps();
844 core_num_wrps = get_num_wrps();
845
846 pr_info("found %d breakpoint and %d watchpoint registers.\n",
847 core_num_brps, core_num_wrps);
848
849 /*
850 * Reset the breakpoint resources. We assume that a halting
851 * debugger will leave the world in a nice state for us.
852 */
853 smp_call_function(reset_ctrl_regs, NULL, 1);
854 reset_ctrl_regs(NULL);
855
856 /* Register debug fault handlers. */
857 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
858 TRAP_HWBKPT, "hw-breakpoint handler");
859 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
860 TRAP_HWBKPT, "hw-watchpoint handler");
861
862 /* Register hotplug notifier. */
863 register_cpu_notifier(&hw_breakpoint_reset_nb);
864
865 return 0;
866}
867arch_initcall(arch_hw_breakpoint_init);
868
869void hw_breakpoint_pmu_read(struct perf_event *bp)
870{
871}
872
873/*
874 * Dummy function to register with die_notifier.
875 */
876int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
877 unsigned long val, void *data)
878{
879 return NOTIFY_DONE;
880}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
new file mode 100644
index 000000000000..7d37ead4d199
--- /dev/null
+++ b/arch/arm64/kernel/io.c
@@ -0,0 +1,64 @@
1/*
2 * Based on arch/arm/kernel/io.c
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/export.h>
20#include <linux/types.h>
21#include <linux/io.h>
22
23/*
24 * Copy data from IO memory space to "real" memory space.
25 */
26void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
27{
28 unsigned char *t = to;
29 while (count) {
30 count--;
31 *t = readb(from);
32 t++;
33 from++;
34 }
35}
36EXPORT_SYMBOL(__memcpy_fromio);
37
38/*
39 * Copy data from "real" memory space to IO memory space.
40 */
41void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
42{
43 const unsigned char *f = from;
44 while (count) {
45 count--;
46 writeb(*f, to);
47 f++;
48 to++;
49 }
50}
51EXPORT_SYMBOL(__memcpy_toio);
52
53/*
54 * "memset" on IO memory space.
55 */
56void __memset_io(volatile void __iomem *dst, int c, size_t count)
57{
58 while (count) {
59 count--;
60 writeb(c, dst);
61 dst++;
62 }
63}
64EXPORT_SYMBOL(__memset_io);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
new file mode 100644
index 000000000000..0373c6609eaf
--- /dev/null
+++ b/arch/arm64/kernel/irq.c
@@ -0,0 +1,84 @@
1/*
2 * Based on arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
7 * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
8 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
9 * Copyright (C) 2012 ARM Ltd.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/kernel_stat.h>
25#include <linux/irq.h>
26#include <linux/smp.h>
27#include <linux/init.h>
28#include <linux/of_irq.h>
29#include <linux/seq_file.h>
30#include <linux/ratelimit.h>
31
32unsigned long irq_err_count;
33
34int arch_show_interrupts(struct seq_file *p, int prec)
35{
36#ifdef CONFIG_SMP
37 show_ipi_list(p, prec);
38#endif
39 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
40 return 0;
41}
42
43/*
44 * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
45 * not come via this function. Instead, they should provide their
46 * own 'handler'. Used by platform code implementing C-based 1st
47 * level decoding.
48 */
49void handle_IRQ(unsigned int irq, struct pt_regs *regs)
50{
51 struct pt_regs *old_regs = set_irq_regs(regs);
52
53 irq_enter();
54
55 /*
56 * Some hardware gives randomly wrong interrupts. Rather
57 * than crashing, do something sensible.
58 */
59 if (unlikely(irq >= nr_irqs)) {
60 pr_warn_ratelimited("Bad IRQ%u\n", irq);
61 ack_bad_irq(irq);
62 } else {
63 generic_handle_irq(irq);
64 }
65
66 irq_exit();
67 set_irq_regs(old_regs);
68}
69
70/*
71 * Interrupt controllers supported by the kernel.
72 */
73static const struct of_device_id intctrl_of_match[] __initconst = {
74 /* IRQ controllers { .compatible, .data } info to go here */
75 {}
76};
77
78void __init init_IRQ(void)
79{
80 of_irq_init(intctrl_of_match);
81
82 if (!handle_arch_irq)
83 panic("No interrupt controller found.");
84}
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
new file mode 100644
index 000000000000..8b69ecb1d8bc
--- /dev/null
+++ b/arch/arm64/kernel/kuser32.S
@@ -0,0 +1,77 @@
1/*
2 * Low-level user helpers placed in the vectors page for AArch32.
3 * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
4 *
5 * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 *
21 * AArch32 user helpers.
22 *
23 * Each segment is 32-byte aligned and will be moved to the top of the high
24 * vector page. New segments (if ever needed) must be added in front of
25 * existing ones. This mechanism should be used only for things that are
26 * really small and justified, and not be abused freely.
27 *
28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
29 */
30 .align 5
31 .globl __kuser_helper_start
32__kuser_helper_start:
33
34__kuser_cmpxchg64: // 0xffff0f60
35 .inst 0xe92d00f0 // push {r4, r5, r6, r7}
36 .inst 0xe1c040d0 // ldrd r4, r5, [r0]
37 .inst 0xe1c160d0 // ldrd r6, r7, [r1]
38 .inst 0xf57ff05f // dmb sy
39 .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
40 .inst 0xe0303004 // eors r3, r0, r4
41 .inst 0x00313005 // eoreqs r3, r1, r5
42 .inst 0x01a23f96 // strexdeq r3, r6, [r2]
43 .inst 0x03330001 // teqeq r3, #1
44 .inst 0x0afffff9 // beq 1b
45 .inst 0xf57ff05f // dmb sy
46 .inst 0xe2730000 // rsbs r0, r3, #0
47 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
48 .inst 0xe12fff1e // bx lr
49
50 .align 5
51__kuser_memory_barrier: // 0xffff0fa0
52 .inst 0xf57ff05f // dmb sy
53 .inst 0xe12fff1e // bx lr
54
55 .align 5
56__kuser_cmpxchg: // 0xffff0fc0
57 .inst 0xf57ff05f // dmb sy
58 .inst 0xe1923f9f // 1: ldrex r3, [r2]
59 .inst 0xe0533000 // subs r3, r3, r0
60 .inst 0x01823f91 // strexeq r3, r1, [r2]
61 .inst 0x03330001 // teqeq r3, #1
62 .inst 0x0afffffa // beq 1b
63 .inst 0xe2730000 // rsbs r0, r3, #0
64 .inst 0xeaffffef // b <__kuser_memory_barrier>
65
66 .align 5
67__kuser_get_tls: // 0xffff0fe0
68 .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3
69 .inst 0xe12fff1e // bx lr
70 .rep 5
71 .word 0
72 .endr
73
74__kuser_helper_version: // 0xffff0ffc
75 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
76 .globl __kuser_helper_end
77__kuser_helper_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
new file mode 100644
index 000000000000..ca0e3d55da99
--- /dev/null
+++ b/arch/arm64/kernel/module.c
@@ -0,0 +1,456 @@
1/*
2 * AArch64 loadable module support.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bitops.h>
22#include <linux/elf.h>
23#include <linux/gfp.h>
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/moduleloader.h>
27#include <linux/vmalloc.h>
28
29void *module_alloc(unsigned long size)
30{
31 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
32 GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
33 __builtin_return_address(0));
34}
35
36enum aarch64_reloc_op {
37 RELOC_OP_NONE,
38 RELOC_OP_ABS,
39 RELOC_OP_PREL,
40 RELOC_OP_PAGE,
41};
42
43static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
44{
45 switch (reloc_op) {
46 case RELOC_OP_ABS:
47 return val;
48 case RELOC_OP_PREL:
49 return val - (u64)place;
50 case RELOC_OP_PAGE:
51 return (val & ~0xfff) - ((u64)place & ~0xfff);
52 case RELOC_OP_NONE:
53 return 0;
54 }
55
56 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
57 return 0;
58}
59
60static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
61{
62 u64 imm_mask = (1 << len) - 1;
63 s64 sval = do_reloc(op, place, val);
64
65 switch (len) {
66 case 16:
67 *(s16 *)place = sval;
68 break;
69 case 32:
70 *(s32 *)place = sval;
71 break;
72 case 64:
73 *(s64 *)place = sval;
74 break;
75 default:
76 pr_err("Invalid length (%d) for data relocation\n", len);
77 return 0;
78 }
79
80 /*
81 * Extract the upper value bits (including the sign bit) and
82 * shift them to bit 0.
83 */
84 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
85
86 /*
87 * Overflow has occurred if the value is not representable in
88 * len bits (i.e the bottom len bits are not sign-extended and
89 * the top bits are not all zero).
90 */
91 if ((u64)(sval + 1) > 2)
92 return -ERANGE;
93
94 return 0;
95}
96
97enum aarch64_imm_type {
98 INSN_IMM_MOVNZ,
99 INSN_IMM_MOVK,
100 INSN_IMM_ADR,
101 INSN_IMM_26,
102 INSN_IMM_19,
103 INSN_IMM_16,
104 INSN_IMM_14,
105 INSN_IMM_12,
106 INSN_IMM_9,
107};
108
109static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
110{
111 u32 immlo, immhi, lomask, himask, mask;
112 int shift;
113
114 switch (type) {
115 case INSN_IMM_MOVNZ:
116 /*
117 * For signed MOVW relocations, we have to manipulate the
118 * instruction encoding depending on whether or not the
119 * immediate is less than zero.
120 */
121 insn &= ~(3 << 29);
122 if ((s64)imm >= 0) {
123 /* >=0: Set the instruction to MOVZ (opcode 10b). */
124 insn |= 2 << 29;
125 } else {
126 /*
127 * <0: Set the instruction to MOVN (opcode 00b).
128 * Since we've masked the opcode already, we
129 * don't need to do anything other than
130 * inverting the new immediate field.
131 */
132 imm = ~imm;
133 }
134 case INSN_IMM_MOVK:
135 mask = BIT(16) - 1;
136 shift = 5;
137 break;
138 case INSN_IMM_ADR:
139 lomask = 0x3;
140 himask = 0x7ffff;
141 immlo = imm & lomask;
142 imm >>= 2;
143 immhi = imm & himask;
144 imm = (immlo << 24) | (immhi);
145 mask = (lomask << 24) | (himask);
146 shift = 5;
147 break;
148 case INSN_IMM_26:
149 mask = BIT(26) - 1;
150 shift = 0;
151 break;
152 case INSN_IMM_19:
153 mask = BIT(19) - 1;
154 shift = 5;
155 break;
156 case INSN_IMM_16:
157 mask = BIT(16) - 1;
158 shift = 5;
159 break;
160 case INSN_IMM_14:
161 mask = BIT(14) - 1;
162 shift = 5;
163 break;
164 case INSN_IMM_12:
165 mask = BIT(12) - 1;
166 shift = 10;
167 break;
168 case INSN_IMM_9:
169 mask = BIT(9) - 1;
170 shift = 12;
171 break;
172 default:
173 pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
174 type);
175 return 0;
176 }
177
178 /* Update the immediate field. */
179 insn &= ~(mask << shift);
180 insn |= (imm & mask) << shift;
181
182 return insn;
183}
184
185static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
186 int lsb, enum aarch64_imm_type imm_type)
187{
188 u64 imm, limit = 0;
189 s64 sval;
190 u32 insn = *(u32 *)place;
191
192 sval = do_reloc(op, place, val);
193 sval >>= lsb;
194 imm = sval & 0xffff;
195
196 /* Update the instruction with the new encoding. */
197 *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
198
199 /* Shift out the immediate field. */
200 sval >>= 16;
201
202 /*
203 * For unsigned immediates, the overflow check is straightforward.
204 * For signed immediates, the sign bit is actually the bit past the
205 * most significant bit of the field.
206 * The INSN_IMM_16 immediate type is unsigned.
207 */
208 if (imm_type != INSN_IMM_16) {
209 sval++;
210 limit++;
211 }
212
213 /* Check the upper bits depending on the sign of the immediate. */
214 if ((u64)sval > limit)
215 return -ERANGE;
216
217 return 0;
218}
219
220static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
221 int lsb, int len, enum aarch64_imm_type imm_type)
222{
223 u64 imm, imm_mask;
224 s64 sval;
225 u32 insn = *(u32 *)place;
226
227 /* Calculate the relocation value. */
228 sval = do_reloc(op, place, val);
229 sval >>= lsb;
230
231 /* Extract the value bits and shift them to bit 0. */
232 imm_mask = (BIT(lsb + len) - 1) >> lsb;
233 imm = sval & imm_mask;
234
235 /* Update the instruction's immediate field. */
236 *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
237
238 /*
239 * Extract the upper value bits (including the sign bit) and
240 * shift them to bit 0.
241 */
242 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
243
244 /*
245 * Overflow has occurred if the upper bits are not all equal to
246 * the sign bit of the value.
247 */
248 if ((u64)(sval + 1) >= 2)
249 return -ERANGE;
250
251 return 0;
252}
253
254int apply_relocate_add(Elf64_Shdr *sechdrs,
255 const char *strtab,
256 unsigned int symindex,
257 unsigned int relsec,
258 struct module *me)
259{
260 unsigned int i;
261 int ovf;
262 bool overflow_check;
263 Elf64_Sym *sym;
264 void *loc;
265 u64 val;
266 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
267
268 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
269 /* loc corresponds to P in the AArch64 ELF document. */
270 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
271 + rel[i].r_offset;
272
273 /* sym is the ELF symbol we're referring to. */
274 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
275 + ELF64_R_SYM(rel[i].r_info);
276
277 /* val corresponds to (S + A) in the AArch64 ELF document. */
278 val = sym->st_value + rel[i].r_addend;
279
280 /* Check for overflow by default. */
281 overflow_check = true;
282
283 /* Perform the static relocation. */
284 switch (ELF64_R_TYPE(rel[i].r_info)) {
285 /* Null relocations. */
286 case R_ARM_NONE:
287 case R_AARCH64_NONE:
288 ovf = 0;
289 break;
290
291 /* Data relocations. */
292 case R_AARCH64_ABS64:
293 overflow_check = false;
294 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
295 break;
296 case R_AARCH64_ABS32:
297 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
298 break;
299 case R_AARCH64_ABS16:
300 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
301 break;
302 case R_AARCH64_PREL64:
303 overflow_check = false;
304 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
305 break;
306 case R_AARCH64_PREL32:
307 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
308 break;
309 case R_AARCH64_PREL16:
310 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
311 break;
312
313 /* MOVW instruction relocations. */
314 case R_AARCH64_MOVW_UABS_G0_NC:
315 overflow_check = false;
316 case R_AARCH64_MOVW_UABS_G0:
317 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
318 INSN_IMM_16);
319 break;
320 case R_AARCH64_MOVW_UABS_G1_NC:
321 overflow_check = false;
322 case R_AARCH64_MOVW_UABS_G1:
323 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
324 INSN_IMM_16);
325 break;
326 case R_AARCH64_MOVW_UABS_G2_NC:
327 overflow_check = false;
328 case R_AARCH64_MOVW_UABS_G2:
329 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
330 INSN_IMM_16);
331 break;
332 case R_AARCH64_MOVW_UABS_G3:
333 /* We're using the top bits so we can't overflow. */
334 overflow_check = false;
335 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
336 INSN_IMM_16);
337 break;
338 case R_AARCH64_MOVW_SABS_G0:
339 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
340 INSN_IMM_MOVNZ);
341 break;
342 case R_AARCH64_MOVW_SABS_G1:
343 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
344 INSN_IMM_MOVNZ);
345 break;
346 case R_AARCH64_MOVW_SABS_G2:
347 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
348 INSN_IMM_MOVNZ);
349 break;
350 case R_AARCH64_MOVW_PREL_G0_NC:
351 overflow_check = false;
352 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
353 INSN_IMM_MOVK);
354 break;
355 case R_AARCH64_MOVW_PREL_G0:
356 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
357 INSN_IMM_MOVNZ);
358 break;
359 case R_AARCH64_MOVW_PREL_G1_NC:
360 overflow_check = false;
361 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
362 INSN_IMM_MOVK);
363 break;
364 case R_AARCH64_MOVW_PREL_G1:
365 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
366 INSN_IMM_MOVNZ);
367 break;
368 case R_AARCH64_MOVW_PREL_G2_NC:
369 overflow_check = false;
370 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
371 INSN_IMM_MOVK);
372 break;
373 case R_AARCH64_MOVW_PREL_G2:
374 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
375 INSN_IMM_MOVNZ);
376 break;
377 case R_AARCH64_MOVW_PREL_G3:
378 /* We're using the top bits so we can't overflow. */
379 overflow_check = false;
380 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
381 INSN_IMM_MOVNZ);
382 break;
383
384 /* Immediate instruction relocations. */
385 case R_AARCH64_LD_PREL_LO19:
386 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
387 INSN_IMM_19);
388 break;
389 case R_AARCH64_ADR_PREL_LO21:
390 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
391 INSN_IMM_ADR);
392 break;
393 case R_AARCH64_ADR_PREL_PG_HI21_NC:
394 overflow_check = false;
395 case R_AARCH64_ADR_PREL_PG_HI21:
396 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
397 INSN_IMM_ADR);
398 break;
399 case R_AARCH64_ADD_ABS_LO12_NC:
400 case R_AARCH64_LDST8_ABS_LO12_NC:
401 overflow_check = false;
402 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
403 INSN_IMM_12);
404 break;
405 case R_AARCH64_LDST16_ABS_LO12_NC:
406 overflow_check = false;
407 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
408 INSN_IMM_12);
409 break;
410 case R_AARCH64_LDST32_ABS_LO12_NC:
411 overflow_check = false;
412 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
413 INSN_IMM_12);
414 break;
415 case R_AARCH64_LDST64_ABS_LO12_NC:
416 overflow_check = false;
417 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
418 INSN_IMM_12);
419 break;
420 case R_AARCH64_LDST128_ABS_LO12_NC:
421 overflow_check = false;
422 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
423 INSN_IMM_12);
424 break;
425 case R_AARCH64_TSTBR14:
426 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
427 INSN_IMM_14);
428 break;
429 case R_AARCH64_CONDBR19:
430 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
431 INSN_IMM_19);
432 break;
433 case R_AARCH64_JUMP26:
434 case R_AARCH64_CALL26:
435 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
436 INSN_IMM_26);
437 break;
438
439 default:
440 pr_err("module %s: unsupported RELA relocation: %llu\n",
441 me->name, ELF64_R_TYPE(rel[i].r_info));
442 return -ENOEXEC;
443 }
444
445 if (overflow_check && ovf == -ERANGE)
446 goto overflow;
447
448 }
449
450 return 0;
451
452overflow:
453 pr_err("module %s: overflow in relocation type %d val %Lx\n",
454 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
455 return -ENOEXEC;
456}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
new file mode 100644
index 000000000000..ecbf2d81ec5c
--- /dev/null
+++ b/arch/arm64/kernel/perf_event.c
@@ -0,0 +1,1368 @@
1/*
2 * PMU support
3 *
4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This code is based heavily on the ARMv7 perf event code.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#define pr_fmt(fmt) "hw perfevents: " fmt
22
23#include <linux/bitmap.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
26#include <linux/export.h>
27#include <linux/perf_event.h>
28#include <linux/platform_device.h>
29#include <linux/spinlock.h>
30#include <linux/uaccess.h>
31
32#include <asm/cputype.h>
33#include <asm/irq.h>
34#include <asm/irq_regs.h>
35#include <asm/pmu.h>
36#include <asm/stacktrace.h>
37
38/*
39 * ARMv8 supports a maximum of 32 events.
40 * The cycle counter is included in this total.
41 */
42#define ARMPMU_MAX_HWEVENTS 32
43
44static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
45static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
46static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
47
48#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
49
50/* Set at runtime when we know what CPU type we are. */
51static struct arm_pmu *cpu_pmu;
52
53int
54armpmu_get_max_events(void)
55{
56 int max_events = 0;
57
58 if (cpu_pmu != NULL)
59 max_events = cpu_pmu->num_events;
60
61 return max_events;
62}
63EXPORT_SYMBOL_GPL(armpmu_get_max_events);
64
65int perf_num_counters(void)
66{
67 return armpmu_get_max_events();
68}
69EXPORT_SYMBOL_GPL(perf_num_counters);
70
71#define HW_OP_UNSUPPORTED 0xFFFF
72
73#define C(_x) \
74 PERF_COUNT_HW_CACHE_##_x
75
76#define CACHE_OP_UNSUPPORTED 0xFFFF
77
78static int
79armpmu_map_cache_event(const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
83 u64 config)
84{
85 unsigned int cache_type, cache_op, cache_result, ret;
86
87 cache_type = (config >> 0) & 0xff;
88 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
89 return -EINVAL;
90
91 cache_op = (config >> 8) & 0xff;
92 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
93 return -EINVAL;
94
95 cache_result = (config >> 16) & 0xff;
96 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
97 return -EINVAL;
98
99 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
100
101 if (ret == CACHE_OP_UNSUPPORTED)
102 return -ENOENT;
103
104 return ret;
105}
106
107static int
108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
109{
110 int mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
112}
113
114static int
115armpmu_map_raw_event(u32 raw_event_mask, u64 config)
116{
117 return (int)(config & raw_event_mask);
118}
119
120static int map_cpu_event(struct perf_event *event,
121 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
122 const unsigned (*cache_map)
123 [PERF_COUNT_HW_CACHE_MAX]
124 [PERF_COUNT_HW_CACHE_OP_MAX]
125 [PERF_COUNT_HW_CACHE_RESULT_MAX],
126 u32 raw_event_mask)
127{
128 u64 config = event->attr.config;
129
130 switch (event->attr.type) {
131 case PERF_TYPE_HARDWARE:
132 return armpmu_map_event(event_map, config);
133 case PERF_TYPE_HW_CACHE:
134 return armpmu_map_cache_event(cache_map, config);
135 case PERF_TYPE_RAW:
136 return armpmu_map_raw_event(raw_event_mask, config);
137 }
138
139 return -ENOENT;
140}
141
142int
143armpmu_event_set_period(struct perf_event *event,
144 struct hw_perf_event *hwc,
145 int idx)
146{
147 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
148 s64 left = local64_read(&hwc->period_left);
149 s64 period = hwc->sample_period;
150 int ret = 0;
151
152 if (unlikely(left <= -period)) {
153 left = period;
154 local64_set(&hwc->period_left, left);
155 hwc->last_period = period;
156 ret = 1;
157 }
158
159 if (unlikely(left <= 0)) {
160 left += period;
161 local64_set(&hwc->period_left, left);
162 hwc->last_period = period;
163 ret = 1;
164 }
165
166 if (left > (s64)armpmu->max_period)
167 left = armpmu->max_period;
168
169 local64_set(&hwc->prev_count, (u64)-left);
170
171 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
172
173 perf_event_update_userpage(event);
174
175 return ret;
176}
177
178u64
179armpmu_event_update(struct perf_event *event,
180 struct hw_perf_event *hwc,
181 int idx)
182{
183 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
184 u64 delta, prev_raw_count, new_raw_count;
185
186again:
187 prev_raw_count = local64_read(&hwc->prev_count);
188 new_raw_count = armpmu->read_counter(idx);
189
190 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
191 new_raw_count) != prev_raw_count)
192 goto again;
193
194 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
195
196 local64_add(delta, &event->count);
197 local64_sub(delta, &hwc->period_left);
198
199 return new_raw_count;
200}
201
202static void
203armpmu_read(struct perf_event *event)
204{
205 struct hw_perf_event *hwc = &event->hw;
206
207 /* Don't read disabled counters! */
208 if (hwc->idx < 0)
209 return;
210
211 armpmu_event_update(event, hwc, hwc->idx);
212}
213
214static void
215armpmu_stop(struct perf_event *event, int flags)
216{
217 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
218 struct hw_perf_event *hwc = &event->hw;
219
220 /*
221 * ARM pmu always has to update the counter, so ignore
222 * PERF_EF_UPDATE, see comments in armpmu_start().
223 */
224 if (!(hwc->state & PERF_HES_STOPPED)) {
225 armpmu->disable(hwc, hwc->idx);
226 barrier(); /* why? */
227 armpmu_event_update(event, hwc, hwc->idx);
228 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
229 }
230}
231
232static void
233armpmu_start(struct perf_event *event, int flags)
234{
235 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
236 struct hw_perf_event *hwc = &event->hw;
237
238 /*
239 * ARM pmu always has to reprogram the period, so ignore
240 * PERF_EF_RELOAD, see the comment below.
241 */
242 if (flags & PERF_EF_RELOAD)
243 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
244
245 hwc->state = 0;
246 /*
247 * Set the period again. Some counters can't be stopped, so when we
248 * were stopped we simply disabled the IRQ source and the counter
249 * may have been left counting. If we don't do this step then we may
250 * get an interrupt too soon or *way* too late if the overflow has
251 * happened since disabling.
252 */
253 armpmu_event_set_period(event, hwc, hwc->idx);
254 armpmu->enable(hwc, hwc->idx);
255}
256
257static void
258armpmu_del(struct perf_event *event, int flags)
259{
260 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
261 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
262 struct hw_perf_event *hwc = &event->hw;
263 int idx = hwc->idx;
264
265 WARN_ON(idx < 0);
266
267 armpmu_stop(event, PERF_EF_UPDATE);
268 hw_events->events[idx] = NULL;
269 clear_bit(idx, hw_events->used_mask);
270
271 perf_event_update_userpage(event);
272}
273
274static int
275armpmu_add(struct perf_event *event, int flags)
276{
277 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
278 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
279 struct hw_perf_event *hwc = &event->hw;
280 int idx;
281 int err = 0;
282
283 perf_pmu_disable(event->pmu);
284
285 /* If we don't have a space for the counter then finish early. */
286 idx = armpmu->get_event_idx(hw_events, hwc);
287 if (idx < 0) {
288 err = idx;
289 goto out;
290 }
291
292 /*
293 * If there is an event in the counter we are going to use then make
294 * sure it is disabled.
295 */
296 event->hw.idx = idx;
297 armpmu->disable(hwc, idx);
298 hw_events->events[idx] = event;
299
300 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
301 if (flags & PERF_EF_START)
302 armpmu_start(event, PERF_EF_RELOAD);
303
304 /* Propagate our changes to the userspace mapping. */
305 perf_event_update_userpage(event);
306
307out:
308 perf_pmu_enable(event->pmu);
309 return err;
310}
311
312static int
313validate_event(struct pmu_hw_events *hw_events,
314 struct perf_event *event)
315{
316 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
317 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu;
319
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
321 return 1;
322
323 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
324}
325
326static int
327validate_group(struct perf_event *event)
328{
329 struct perf_event *sibling, *leader = event->group_leader;
330 struct pmu_hw_events fake_pmu;
331 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
332
333 /*
334 * Initialise the fake PMU. We only need to populate the
335 * used_mask for the purposes of validation.
336 */
337 memset(fake_used_mask, 0, sizeof(fake_used_mask));
338 fake_pmu.used_mask = fake_used_mask;
339
340 if (!validate_event(&fake_pmu, leader))
341 return -EINVAL;
342
343 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
344 if (!validate_event(&fake_pmu, sibling))
345 return -EINVAL;
346 }
347
348 if (!validate_event(&fake_pmu, event))
349 return -EINVAL;
350
351 return 0;
352}
353
354static void
355armpmu_release_hardware(struct arm_pmu *armpmu)
356{
357 int i, irq, irqs;
358 struct platform_device *pmu_device = armpmu->plat_device;
359
360 irqs = min(pmu_device->num_resources, num_possible_cpus());
361
362 for (i = 0; i < irqs; ++i) {
363 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
364 continue;
365 irq = platform_get_irq(pmu_device, i);
366 if (irq >= 0)
367 free_irq(irq, armpmu);
368 }
369}
370
371static int
372armpmu_reserve_hardware(struct arm_pmu *armpmu)
373{
374 int i, err, irq, irqs;
375 struct platform_device *pmu_device = armpmu->plat_device;
376
377 if (!pmu_device) {
378 pr_err("no PMU device registered\n");
379 return -ENODEV;
380 }
381
382 irqs = min(pmu_device->num_resources, num_possible_cpus());
383 if (irqs < 1) {
384 pr_err("no irqs for PMUs defined\n");
385 return -ENODEV;
386 }
387
388 for (i = 0; i < irqs; ++i) {
389 err = 0;
390 irq = platform_get_irq(pmu_device, i);
391 if (irq < 0)
392 continue;
393
394 /*
395 * If we have a single PMU interrupt that we can't shift,
396 * assume that we're running on a uniprocessor machine and
397 * continue. Otherwise, continue without this interrupt.
398 */
399 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
400 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
401 irq, i);
402 continue;
403 }
404
405 err = request_irq(irq, armpmu->handle_irq,
406 IRQF_NOBALANCING,
407 "arm-pmu", armpmu);
408 if (err) {
409 pr_err("unable to request IRQ%d for ARM PMU counters\n",
410 irq);
411 armpmu_release_hardware(armpmu);
412 return err;
413 }
414
415 cpumask_set_cpu(i, &armpmu->active_irqs);
416 }
417
418 return 0;
419}
420
421static void
422hw_perf_event_destroy(struct perf_event *event)
423{
424 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
425 atomic_t *active_events = &armpmu->active_events;
426 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
427
428 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
429 armpmu_release_hardware(armpmu);
430 mutex_unlock(pmu_reserve_mutex);
431 }
432}
433
434static int
435event_requires_mode_exclusion(struct perf_event_attr *attr)
436{
437 return attr->exclude_idle || attr->exclude_user ||
438 attr->exclude_kernel || attr->exclude_hv;
439}
440
441static int
442__hw_perf_event_init(struct perf_event *event)
443{
444 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
445 struct hw_perf_event *hwc = &event->hw;
446 int mapping, err;
447
448 mapping = armpmu->map_event(event);
449
450 if (mapping < 0) {
451 pr_debug("event %x:%llx not supported\n", event->attr.type,
452 event->attr.config);
453 return mapping;
454 }
455
456 /*
457 * We don't assign an index until we actually place the event onto
458 * hardware. Use -1 to signify that we haven't decided where to put it
459 * yet. For SMP systems, each core has it's own PMU so we can't do any
460 * clever allocation or constraints checking at this point.
461 */
462 hwc->idx = -1;
463 hwc->config_base = 0;
464 hwc->config = 0;
465 hwc->event_base = 0;
466
467 /*
468 * Check whether we need to exclude the counter from certain modes.
469 */
470 if ((!armpmu->set_event_filter ||
471 armpmu->set_event_filter(hwc, &event->attr)) &&
472 event_requires_mode_exclusion(&event->attr)) {
473 pr_debug("ARM performance counters do not support mode exclusion\n");
474 return -EPERM;
475 }
476
477 /*
478 * Store the event encoding into the config_base field.
479 */
480 hwc->config_base |= (unsigned long)mapping;
481
482 if (!hwc->sample_period) {
483 /*
484 * For non-sampling runs, limit the sample_period to half
485 * of the counter width. That way, the new counter value
486 * is far less likely to overtake the previous one unless
487 * you have some serious IRQ latency issues.
488 */
489 hwc->sample_period = armpmu->max_period >> 1;
490 hwc->last_period = hwc->sample_period;
491 local64_set(&hwc->period_left, hwc->sample_period);
492 }
493
494 err = 0;
495 if (event->group_leader != event) {
496 err = validate_group(event);
497 if (err)
498 return -EINVAL;
499 }
500
501 return err;
502}
503
504static int armpmu_event_init(struct perf_event *event)
505{
506 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
507 int err = 0;
508 atomic_t *active_events = &armpmu->active_events;
509
510 if (armpmu->map_event(event) == -ENOENT)
511 return -ENOENT;
512
513 event->destroy = hw_perf_event_destroy;
514
515 if (!atomic_inc_not_zero(active_events)) {
516 mutex_lock(&armpmu->reserve_mutex);
517 if (atomic_read(active_events) == 0)
518 err = armpmu_reserve_hardware(armpmu);
519
520 if (!err)
521 atomic_inc(active_events);
522 mutex_unlock(&armpmu->reserve_mutex);
523 }
524
525 if (err)
526 return err;
527
528 err = __hw_perf_event_init(event);
529 if (err)
530 hw_perf_event_destroy(event);
531
532 return err;
533}
534
535static void armpmu_enable(struct pmu *pmu)
536{
537 struct arm_pmu *armpmu = to_arm_pmu(pmu);
538 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
539 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
540
541 if (enabled)
542 armpmu->start();
543}
544
545static void armpmu_disable(struct pmu *pmu)
546{
547 struct arm_pmu *armpmu = to_arm_pmu(pmu);
548 armpmu->stop();
549}
550
551static void __init armpmu_init(struct arm_pmu *armpmu)
552{
553 atomic_set(&armpmu->active_events, 0);
554 mutex_init(&armpmu->reserve_mutex);
555
556 armpmu->pmu = (struct pmu) {
557 .pmu_enable = armpmu_enable,
558 .pmu_disable = armpmu_disable,
559 .event_init = armpmu_event_init,
560 .add = armpmu_add,
561 .del = armpmu_del,
562 .start = armpmu_start,
563 .stop = armpmu_stop,
564 .read = armpmu_read,
565 };
566}
567
568int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
569{
570 armpmu_init(armpmu);
571 return perf_pmu_register(&armpmu->pmu, name, type);
572}
573
574/*
575 * ARMv8 PMUv3 Performance Events handling code.
576 * Common event types.
577 */
578enum armv8_pmuv3_perf_types {
579 /* Required events. */
580 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
581 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
582 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
583 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
584 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
585 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
586
587 /* At least one of the following is required. */
588 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
589 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
590
591 /* Common architectural events. */
592 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
593 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
594 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
595 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
596 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
597 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
598 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
599 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
600 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
601 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
602
603 /* Common microarchitectural events. */
604 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
605 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
606 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
607 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
608 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
609 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
610 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
611 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
612 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
613 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
614 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
615 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
616
617 /*
618 * This isn't an architected event.
619 * We detect this event number and use the cycle counter instead.
620 */
621 ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF,
622};
623
624/* PMUv3 HW events mapping. */
625static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
626 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
627 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
628 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
629 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
630 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
631 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
632 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
633 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
634 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
635};
636
637static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
638 [PERF_COUNT_HW_CACHE_OP_MAX]
639 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
640 [C(L1D)] = {
641 [C(OP_READ)] = {
642 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
643 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
644 },
645 [C(OP_WRITE)] = {
646 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
647 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
648 },
649 [C(OP_PREFETCH)] = {
650 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
651 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
652 },
653 },
654 [C(L1I)] = {
655 [C(OP_READ)] = {
656 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
657 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
658 },
659 [C(OP_WRITE)] = {
660 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
661 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
662 },
663 [C(OP_PREFETCH)] = {
664 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
665 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
666 },
667 },
668 [C(LL)] = {
669 [C(OP_READ)] = {
670 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
671 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
672 },
673 [C(OP_WRITE)] = {
674 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
675 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
676 },
677 [C(OP_PREFETCH)] = {
678 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
679 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
680 },
681 },
682 [C(DTLB)] = {
683 [C(OP_READ)] = {
684 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
685 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
686 },
687 [C(OP_WRITE)] = {
688 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
689 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
690 },
691 [C(OP_PREFETCH)] = {
692 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
693 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
694 },
695 },
696 [C(ITLB)] = {
697 [C(OP_READ)] = {
698 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
699 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
700 },
701 [C(OP_WRITE)] = {
702 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
703 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
704 },
705 [C(OP_PREFETCH)] = {
706 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
707 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
708 },
709 },
710 [C(BPU)] = {
711 [C(OP_READ)] = {
712 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
713 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
714 },
715 [C(OP_WRITE)] = {
716 [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
717 [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
718 },
719 [C(OP_PREFETCH)] = {
720 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
721 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
722 },
723 },
724 [C(NODE)] = {
725 [C(OP_READ)] = {
726 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
727 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
728 },
729 [C(OP_WRITE)] = {
730 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
731 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
732 },
733 [C(OP_PREFETCH)] = {
734 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
735 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
736 },
737 },
738};
739
740/*
741 * Perf Events' indices
742 */
743#define ARMV8_IDX_CYCLE_COUNTER 0
744#define ARMV8_IDX_COUNTER0 1
745#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
746
747#define ARMV8_MAX_COUNTERS 32
748#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
749
750/*
751 * ARMv8 low level PMU access
752 */
753
754/*
755 * Perf Event to low level counters mapping
756 */
757#define ARMV8_IDX_TO_COUNTER(x) \
758 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
759
760/*
761 * Per-CPU PMCR: config reg
762 */
763#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
764#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
765#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
766#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
767#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
768#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
769#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
770#define ARMV8_PMCR_N_MASK 0x1f
771#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
772
773/*
774 * PMOVSR: counters overflow flag status reg
775 */
776#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
777#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
778
779/*
780 * PMXEVTYPER: Event selection reg
781 */
782#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
783#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
784
785/*
786 * Event filters for PMUv3
787 */
788#define ARMV8_EXCLUDE_EL1 (1 << 31)
789#define ARMV8_EXCLUDE_EL0 (1 << 30)
790#define ARMV8_INCLUDE_EL2 (1 << 27)
791
792static inline u32 armv8pmu_pmcr_read(void)
793{
794 u32 val;
795 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
796 return val;
797}
798
799static inline void armv8pmu_pmcr_write(u32 val)
800{
801 val &= ARMV8_PMCR_MASK;
802 isb();
803 asm volatile("msr pmcr_el0, %0" :: "r" (val));
804}
805
806static inline int armv8pmu_has_overflowed(u32 pmovsr)
807{
808 return pmovsr & ARMV8_OVERFLOWED_MASK;
809}
810
811static inline int armv8pmu_counter_valid(int idx)
812{
813 return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
814}
815
816static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
817{
818 int ret = 0;
819 u32 counter;
820
821 if (!armv8pmu_counter_valid(idx)) {
822 pr_err("CPU%u checking wrong counter %d overflow status\n",
823 smp_processor_id(), idx);
824 } else {
825 counter = ARMV8_IDX_TO_COUNTER(idx);
826 ret = pmnc & BIT(counter);
827 }
828
829 return ret;
830}
831
832static inline int armv8pmu_select_counter(int idx)
833{
834 u32 counter;
835
836 if (!armv8pmu_counter_valid(idx)) {
837 pr_err("CPU%u selecting wrong PMNC counter %d\n",
838 smp_processor_id(), idx);
839 return -EINVAL;
840 }
841
842 counter = ARMV8_IDX_TO_COUNTER(idx);
843 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
844 isb();
845
846 return idx;
847}
848
849static inline u32 armv8pmu_read_counter(int idx)
850{
851 u32 value = 0;
852
853 if (!armv8pmu_counter_valid(idx))
854 pr_err("CPU%u reading wrong counter %d\n",
855 smp_processor_id(), idx);
856 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
857 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
858 else if (armv8pmu_select_counter(idx) == idx)
859 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
860
861 return value;
862}
863
864static inline void armv8pmu_write_counter(int idx, u32 value)
865{
866 if (!armv8pmu_counter_valid(idx))
867 pr_err("CPU%u writing wrong counter %d\n",
868 smp_processor_id(), idx);
869 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
870 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
871 else if (armv8pmu_select_counter(idx) == idx)
872 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
873}
874
875static inline void armv8pmu_write_evtype(int idx, u32 val)
876{
877 if (armv8pmu_select_counter(idx) == idx) {
878 val &= ARMV8_EVTYPE_MASK;
879 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
880 }
881}
882
883static inline int armv8pmu_enable_counter(int idx)
884{
885 u32 counter;
886
887 if (!armv8pmu_counter_valid(idx)) {
888 pr_err("CPU%u enabling wrong PMNC counter %d\n",
889 smp_processor_id(), idx);
890 return -EINVAL;
891 }
892
893 counter = ARMV8_IDX_TO_COUNTER(idx);
894 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
895 return idx;
896}
897
898static inline int armv8pmu_disable_counter(int idx)
899{
900 u32 counter;
901
902 if (!armv8pmu_counter_valid(idx)) {
903 pr_err("CPU%u disabling wrong PMNC counter %d\n",
904 smp_processor_id(), idx);
905 return -EINVAL;
906 }
907
908 counter = ARMV8_IDX_TO_COUNTER(idx);
909 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
910 return idx;
911}
912
913static inline int armv8pmu_enable_intens(int idx)
914{
915 u32 counter;
916
917 if (!armv8pmu_counter_valid(idx)) {
918 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
919 smp_processor_id(), idx);
920 return -EINVAL;
921 }
922
923 counter = ARMV8_IDX_TO_COUNTER(idx);
924 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
925 return idx;
926}
927
928static inline int armv8pmu_disable_intens(int idx)
929{
930 u32 counter;
931
932 if (!armv8pmu_counter_valid(idx)) {
933 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
934 smp_processor_id(), idx);
935 return -EINVAL;
936 }
937
938 counter = ARMV8_IDX_TO_COUNTER(idx);
939 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
940 isb();
941 /* Clear the overflow flag in case an interrupt is pending. */
942 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
943 isb();
944 return idx;
945}
946
947static inline u32 armv8pmu_getreset_flags(void)
948{
949 u32 value;
950
951 /* Read */
952 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
953
954 /* Write to clear flags */
955 value &= ARMV8_OVSR_MASK;
956 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
957
958 return value;
959}
960
961static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
962{
963 unsigned long flags;
964 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
965
966 /*
967 * Enable counter and interrupt, and set the counter to count
968 * the event that we're interested in.
969 */
970 raw_spin_lock_irqsave(&events->pmu_lock, flags);
971
972 /*
973 * Disable counter
974 */
975 armv8pmu_disable_counter(idx);
976
977 /*
978 * Set event (if destined for PMNx counters).
979 */
980 armv8pmu_write_evtype(idx, hwc->config_base);
981
982 /*
983 * Enable interrupt for this counter
984 */
985 armv8pmu_enable_intens(idx);
986
987 /*
988 * Enable counter
989 */
990 armv8pmu_enable_counter(idx);
991
992 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
993}
994
995static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
996{
997 unsigned long flags;
998 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
999
1000 /*
1001 * Disable counter and interrupt
1002 */
1003 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1004
1005 /*
1006 * Disable counter
1007 */
1008 armv8pmu_disable_counter(idx);
1009
1010 /*
1011 * Disable interrupt for this counter
1012 */
1013 armv8pmu_disable_intens(idx);
1014
1015 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1016}
1017
1018static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
1019{
1020 u32 pmovsr;
1021 struct perf_sample_data data;
1022 struct pmu_hw_events *cpuc;
1023 struct pt_regs *regs;
1024 int idx;
1025
1026 /*
1027 * Get and reset the IRQ flags
1028 */
1029 pmovsr = armv8pmu_getreset_flags();
1030
1031 /*
1032 * Did an overflow occur?
1033 */
1034 if (!armv8pmu_has_overflowed(pmovsr))
1035 return IRQ_NONE;
1036
1037 /*
1038 * Handle the counter(s) overflow(s)
1039 */
1040 regs = get_irq_regs();
1041
1042 cpuc = &__get_cpu_var(cpu_hw_events);
1043 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1044 struct perf_event *event = cpuc->events[idx];
1045 struct hw_perf_event *hwc;
1046
1047 /* Ignore if we don't have an event. */
1048 if (!event)
1049 continue;
1050
1051 /*
1052 * We have a single interrupt for all counters. Check that
1053 * each counter has overflowed before we process it.
1054 */
1055 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
1056 continue;
1057
1058 hwc = &event->hw;
1059 armpmu_event_update(event, hwc, idx);
1060 perf_sample_data_init(&data, 0, hwc->last_period);
1061 if (!armpmu_event_set_period(event, hwc, idx))
1062 continue;
1063
1064 if (perf_event_overflow(event, &data, regs))
1065 cpu_pmu->disable(hwc, idx);
1066 }
1067
1068 /*
1069 * Handle the pending perf events.
1070 *
1071 * Note: this call *must* be run with interrupts disabled. For
1072 * platforms that can have the PMU interrupts raised as an NMI, this
1073 * will not work.
1074 */
1075 irq_work_run();
1076
1077 return IRQ_HANDLED;
1078}
1079
1080static void armv8pmu_start(void)
1081{
1082 unsigned long flags;
1083 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1084
1085 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1086 /* Enable all counters */
1087 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
1088 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1089}
1090
1091static void armv8pmu_stop(void)
1092{
1093 unsigned long flags;
1094 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1095
1096 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1097 /* Disable all counters */
1098 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
1099 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1100}
1101
1102static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
1103 struct hw_perf_event *event)
1104{
1105 int idx;
1106 unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
1107
1108 /* Always place a cycle counter into the cycle counter. */
1109 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
1110 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
1111 return -EAGAIN;
1112
1113 return ARMV8_IDX_CYCLE_COUNTER;
1114 }
1115
1116 /*
1117 * For anything other than a cycle counter, try and use
1118 * the events counters
1119 */
1120 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1121 if (!test_and_set_bit(idx, cpuc->used_mask))
1122 return idx;
1123 }
1124
1125 /* The counters are all in use. */
1126 return -EAGAIN;
1127}
1128
1129/*
1130 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1131 */
1132static int armv8pmu_set_event_filter(struct hw_perf_event *event,
1133 struct perf_event_attr *attr)
1134{
1135 unsigned long config_base = 0;
1136
1137 if (attr->exclude_idle)
1138 return -EPERM;
1139 if (attr->exclude_user)
1140 config_base |= ARMV8_EXCLUDE_EL0;
1141 if (attr->exclude_kernel)
1142 config_base |= ARMV8_EXCLUDE_EL1;
1143 if (!attr->exclude_hv)
1144 config_base |= ARMV8_INCLUDE_EL2;
1145
1146 /*
1147 * Install the filter into config_base as this is used to
1148 * construct the event type.
1149 */
1150 event->config_base = config_base;
1151
1152 return 0;
1153}
1154
1155static void armv8pmu_reset(void *info)
1156{
1157 u32 idx, nb_cnt = cpu_pmu->num_events;
1158
1159 /* The counter and interrupt enable registers are unknown at reset. */
1160 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1161 armv8pmu_disable_event(NULL, idx);
1162
1163 /* Initialize & Reset PMNC: C and P bits. */
1164 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
1165
1166 /* Disable access from userspace. */
1167 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
1168}
1169
1170static int armv8_pmuv3_map_event(struct perf_event *event)
1171{
1172 return map_cpu_event(event, &armv8_pmuv3_perf_map,
1173 &armv8_pmuv3_perf_cache_map, 0xFF);
1174}
1175
1176static struct arm_pmu armv8pmu = {
1177 .handle_irq = armv8pmu_handle_irq,
1178 .enable = armv8pmu_enable_event,
1179 .disable = armv8pmu_disable_event,
1180 .read_counter = armv8pmu_read_counter,
1181 .write_counter = armv8pmu_write_counter,
1182 .get_event_idx = armv8pmu_get_event_idx,
1183 .start = armv8pmu_start,
1184 .stop = armv8pmu_stop,
1185 .reset = armv8pmu_reset,
1186 .max_period = (1LLU << 32) - 1,
1187};
1188
1189static u32 __init armv8pmu_read_num_pmnc_events(void)
1190{
1191 u32 nb_cnt;
1192
1193 /* Read the nb of CNTx counters supported from PMNC */
1194 nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
1195
1196 /* Add the CPU cycles counter and return */
1197 return nb_cnt + 1;
1198}
1199
1200static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
1201{
1202 armv8pmu.name = "arm/armv8-pmuv3";
1203 armv8pmu.map_event = armv8_pmuv3_map_event;
1204 armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
1205 armv8pmu.set_event_filter = armv8pmu_set_event_filter;
1206 return &armv8pmu;
1207}
1208
1209/*
1210 * Ensure the PMU has sane values out of reset.
1211 * This requires SMP to be available, so exists as a separate initcall.
1212 */
1213static int __init
1214cpu_pmu_reset(void)
1215{
1216 if (cpu_pmu && cpu_pmu->reset)
1217 return on_each_cpu(cpu_pmu->reset, NULL, 1);
1218 return 0;
1219}
1220arch_initcall(cpu_pmu_reset);
1221
1222/*
1223 * PMU platform driver and devicetree bindings.
1224 */
1225static struct of_device_id armpmu_of_device_ids[] = {
1226 {.compatible = "arm,armv8-pmuv3"},
1227 {},
1228};
1229
1230static int __devinit armpmu_device_probe(struct platform_device *pdev)
1231{
1232 if (!cpu_pmu)
1233 return -ENODEV;
1234
1235 cpu_pmu->plat_device = pdev;
1236 return 0;
1237}
1238
1239static struct platform_driver armpmu_driver = {
1240 .driver = {
1241 .name = "arm-pmu",
1242 .of_match_table = armpmu_of_device_ids,
1243 },
1244 .probe = armpmu_device_probe,
1245};
1246
1247static int __init register_pmu_driver(void)
1248{
1249 return platform_driver_register(&armpmu_driver);
1250}
1251device_initcall(register_pmu_driver);
1252
1253static struct pmu_hw_events *armpmu_get_cpu_events(void)
1254{
1255 return &__get_cpu_var(cpu_hw_events);
1256}
1257
1258static void __init cpu_pmu_init(struct arm_pmu *armpmu)
1259{
1260 int cpu;
1261 for_each_possible_cpu(cpu) {
1262 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
1263 events->events = per_cpu(hw_events, cpu);
1264 events->used_mask = per_cpu(used_mask, cpu);
1265 raw_spin_lock_init(&events->pmu_lock);
1266 }
1267 armpmu->get_hw_events = armpmu_get_cpu_events;
1268}
1269
1270static int __init init_hw_perf_events(void)
1271{
1272 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
1273
1274 switch ((dfr >> 8) & 0xf) {
1275 case 0x1: /* PMUv3 */
1276 cpu_pmu = armv8_pmuv3_pmu_init();
1277 break;
1278 }
1279
1280 if (cpu_pmu) {
1281 pr_info("enabled with %s PMU driver, %d counters available\n",
1282 cpu_pmu->name, cpu_pmu->num_events);
1283 cpu_pmu_init(cpu_pmu);
1284 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
1285 } else {
1286 pr_info("no hardware support available\n");
1287 }
1288
1289 return 0;
1290}
1291early_initcall(init_hw_perf_events);
1292
1293/*
1294 * Callchain handling code.
1295 */
1296struct frame_tail {
1297 struct frame_tail __user *fp;
1298 unsigned long lr;
1299} __attribute__((packed));
1300
1301/*
1302 * Get the return address for a single stackframe and return a pointer to the
1303 * next frame tail.
1304 */
1305static struct frame_tail __user *
1306user_backtrace(struct frame_tail __user *tail,
1307 struct perf_callchain_entry *entry)
1308{
1309 struct frame_tail buftail;
1310 unsigned long err;
1311
1312 /* Also check accessibility of one struct frame_tail beyond */
1313 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
1314 return NULL;
1315
1316 pagefault_disable();
1317 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
1318 pagefault_enable();
1319
1320 if (err)
1321 return NULL;
1322
1323 perf_callchain_store(entry, buftail.lr);
1324
1325 /*
1326 * Frame pointers should strictly progress back up the stack
1327 * (towards higher addresses).
1328 */
1329 if (tail >= buftail.fp)
1330 return NULL;
1331
1332 return buftail.fp;
1333}
1334
1335void perf_callchain_user(struct perf_callchain_entry *entry,
1336 struct pt_regs *regs)
1337{
1338 struct frame_tail __user *tail;
1339
1340 tail = (struct frame_tail __user *)regs->regs[29];
1341
1342 while (entry->nr < PERF_MAX_STACK_DEPTH &&
1343 tail && !((unsigned long)tail & 0xf))
1344 tail = user_backtrace(tail, entry);
1345}
1346
1347/*
1348 * Gets called by walk_stackframe() for every stackframe. This will be called
1349 * whist unwinding the stackframe and is like a subroutine return so we use
1350 * the PC.
1351 */
1352static int callchain_trace(struct stackframe *frame, void *data)
1353{
1354 struct perf_callchain_entry *entry = data;
1355 perf_callchain_store(entry, frame->pc);
1356 return 0;
1357}
1358
1359void perf_callchain_kernel(struct perf_callchain_entry *entry,
1360 struct pt_regs *regs)
1361{
1362 struct stackframe frame;
1363
1364 frame.fp = regs->regs[29];
1365 frame.sp = regs->sp;
1366 frame.pc = regs->pc;
1367 walk_stackframe(&frame, callchain_trace, entry);
1368}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
new file mode 100644
index 000000000000..f22965ea1cfc
--- /dev/null
+++ b/arch/arm64/kernel/process.c
@@ -0,0 +1,408 @@
1/*
2 * Based on arch/arm/kernel/process.c
3 *
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <stdarg.h>
22
23#include <linux/export.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/user.h>
30#include <linux/delay.h>
31#include <linux/reboot.h>
32#include <linux/interrupt.h>
33#include <linux/kallsyms.h>
34#include <linux/init.h>
35#include <linux/cpu.h>
36#include <linux/elfcore.h>
37#include <linux/pm.h>
38#include <linux/tick.h>
39#include <linux/utsname.h>
40#include <linux/uaccess.h>
41#include <linux/random.h>
42#include <linux/hw_breakpoint.h>
43#include <linux/personality.h>
44#include <linux/notifier.h>
45
46#include <asm/compat.h>
47#include <asm/cacheflush.h>
48#include <asm/processor.h>
49#include <asm/stacktrace.h>
50#include <asm/fpsimd.h>
51
52static void setup_restart(void)
53{
54 /*
55 * Tell the mm system that we are going to reboot -
56 * we may need it to insert some 1:1 mappings so that
57 * soft boot works.
58 */
59 setup_mm_for_reboot();
60
61 /* Clean and invalidate caches */
62 flush_cache_all();
63
64 /* Turn D-cache off */
65 cpu_cache_off();
66
67 /* Push out any further dirty data, and ensure cache is empty */
68 flush_cache_all();
69}
70
71void soft_restart(unsigned long addr)
72{
73 setup_restart();
74 cpu_reset(addr);
75}
76
77/*
78 * Function pointers to optional machine specific functions
79 */
80void (*pm_power_off)(void);
81EXPORT_SYMBOL_GPL(pm_power_off);
82
83void (*pm_restart)(const char *cmd);
84EXPORT_SYMBOL_GPL(pm_restart);
85
86
87/*
88 * This is our default idle handler.
89 */
90static void default_idle(void)
91{
92 /*
93 * This should do all the clock switching and wait for interrupt
94 * tricks
95 */
96 cpu_do_idle();
97 local_irq_enable();
98}
99
100void (*pm_idle)(void) = default_idle;
101EXPORT_SYMBOL_GPL(pm_idle);
102
103/*
104 * The idle thread, has rather strange semantics for calling pm_idle,
105 * but this is what x86 does and we need to do the same, so that
106 * things like cpuidle get called in the same way. The only difference
107 * is that we always respect 'hlt_counter' to prevent low power idle.
108 */
109void cpu_idle(void)
110{
111 local_fiq_enable();
112
113 /* endless idle loop with no priority at all */
114 while (1) {
115 tick_nohz_idle_enter();
116 rcu_idle_enter();
117 while (!need_resched()) {
118 /*
119 * We need to disable interrupts here to ensure
120 * we don't miss a wakeup call.
121 */
122 local_irq_disable();
123 if (!need_resched()) {
124 stop_critical_timings();
125 pm_idle();
126 start_critical_timings();
127 /*
128 * pm_idle functions should always return
129 * with IRQs enabled.
130 */
131 WARN_ON(irqs_disabled());
132 } else {
133 local_irq_enable();
134 }
135 }
136 rcu_idle_exit();
137 tick_nohz_idle_exit();
138 schedule_preempt_disabled();
139 }
140}
141
142void machine_shutdown(void)
143{
144#ifdef CONFIG_SMP
145 smp_send_stop();
146#endif
147}
148
149void machine_halt(void)
150{
151 machine_shutdown();
152 while (1);
153}
154
155void machine_power_off(void)
156{
157 machine_shutdown();
158 if (pm_power_off)
159 pm_power_off();
160}
161
162void machine_restart(char *cmd)
163{
164 machine_shutdown();
165
166 /* Disable interrupts first */
167 local_irq_disable();
168 local_fiq_disable();
169
170 /* Now call the architecture specific reboot code. */
171 if (pm_restart)
172 pm_restart(cmd);
173
174 /*
175 * Whoops - the architecture was unable to reboot.
176 */
177 printk("Reboot failed -- System halted\n");
178 while (1);
179}
180
181void __show_regs(struct pt_regs *regs)
182{
183 int i;
184
185 printk("CPU: %d %s (%s %.*s)\n",
186 raw_smp_processor_id(), print_tainted(),
187 init_utsname()->release,
188 (int)strcspn(init_utsname()->version, " "),
189 init_utsname()->version);
190 print_symbol("PC is at %s\n", instruction_pointer(regs));
191 print_symbol("LR is at %s\n", regs->regs[30]);
192 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
193 regs->pc, regs->regs[30], regs->pstate);
194 printk("sp : %016llx\n", regs->sp);
195 for (i = 29; i >= 0; i--) {
196 printk("x%-2d: %016llx ", i, regs->regs[i]);
197 if (i % 2 == 0)
198 printk("\n");
199 }
200 printk("\n");
201}
202
203void show_regs(struct pt_regs * regs)
204{
205 printk("\n");
206 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
207 __show_regs(regs);
208}
209
210/*
211 * Free current thread data structures etc..
212 */
213void exit_thread(void)
214{
215}
216
217void flush_thread(void)
218{
219 fpsimd_flush_thread();
220 flush_ptrace_hw_breakpoint(current);
221}
222
223void release_thread(struct task_struct *dead_task)
224{
225}
226
227int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
228{
229 fpsimd_save_state(&current->thread.fpsimd_state);
230 *dst = *src;
231 return 0;
232}
233
234asmlinkage void ret_from_fork(void) asm("ret_from_fork");
235
236int copy_thread(unsigned long clone_flags, unsigned long stack_start,
237 unsigned long stk_sz, struct task_struct *p,
238 struct pt_regs *regs)
239{
240 struct pt_regs *childregs = task_pt_regs(p);
241 unsigned long tls = p->thread.tp_value;
242
243 *childregs = *regs;
244 childregs->regs[0] = 0;
245
246 if (is_compat_thread(task_thread_info(p)))
247 childregs->compat_sp = stack_start;
248 else {
249 /*
250 * Read the current TLS pointer from tpidr_el0 as it may be
251 * out-of-sync with the saved value.
252 */
253 asm("mrs %0, tpidr_el0" : "=r" (tls));
254 childregs->sp = stack_start;
255 }
256
257 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
258 p->thread.cpu_context.sp = (unsigned long)childregs;
259 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
260
261 /* If a TLS pointer was passed to clone, use that for the new thread. */
262 if (clone_flags & CLONE_SETTLS)
263 tls = regs->regs[3];
264 p->thread.tp_value = tls;
265
266 ptrace_hw_copy_thread(p);
267
268 return 0;
269}
270
271static void tls_thread_switch(struct task_struct *next)
272{
273 unsigned long tpidr, tpidrro;
274
275 if (!is_compat_task()) {
276 asm("mrs %0, tpidr_el0" : "=r" (tpidr));
277 current->thread.tp_value = tpidr;
278 }
279
280 if (is_compat_thread(task_thread_info(next))) {
281 tpidr = 0;
282 tpidrro = next->thread.tp_value;
283 } else {
284 tpidr = next->thread.tp_value;
285 tpidrro = 0;
286 }
287
288 asm(
289 " msr tpidr_el0, %0\n"
290 " msr tpidrro_el0, %1"
291 : : "r" (tpidr), "r" (tpidrro));
292}
293
294/*
295 * Thread switching.
296 */
297struct task_struct *__switch_to(struct task_struct *prev,
298 struct task_struct *next)
299{
300 struct task_struct *last;
301
302 fpsimd_thread_switch(next);
303 tls_thread_switch(next);
304 hw_breakpoint_thread_switch(next);
305
306 /* the actual thread switch */
307 last = cpu_switch_to(prev, next);
308
309 return last;
310}
311
312/*
313 * Fill in the task's elfregs structure for a core dump.
314 */
315int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
316{
317 elf_core_copy_regs(elfregs, task_pt_regs(t));
318 return 1;
319}
320
321/*
322 * fill in the fpe structure for a core dump...
323 */
324int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
325{
326 return 0;
327}
328EXPORT_SYMBOL(dump_fpu);
329
330/*
331 * Shuffle the argument into the correct register before calling the
332 * thread function. x1 is the thread argument, x2 is the pointer to
333 * the thread function, and x3 points to the exit function.
334 */
335extern void kernel_thread_helper(void);
336asm( ".section .text\n"
337" .align\n"
338" .type kernel_thread_helper, #function\n"
339"kernel_thread_helper:\n"
340" mov x0, x1\n"
341" mov x30, x3\n"
342" br x2\n"
343" .size kernel_thread_helper, . - kernel_thread_helper\n"
344" .previous");
345
346#define kernel_thread_exit do_exit
347
348/*
349 * Create a kernel thread.
350 */
351pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
352{
353 struct pt_regs regs;
354
355 memset(&regs, 0, sizeof(regs));
356
357 regs.regs[1] = (unsigned long)arg;
358 regs.regs[2] = (unsigned long)fn;
359 regs.regs[3] = (unsigned long)kernel_thread_exit;
360 regs.pc = (unsigned long)kernel_thread_helper;
361 regs.pstate = PSR_MODE_EL1h;
362
363 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
364}
365EXPORT_SYMBOL(kernel_thread);
366
367unsigned long get_wchan(struct task_struct *p)
368{
369 struct stackframe frame;
370 int count = 0;
371 if (!p || p == current || p->state == TASK_RUNNING)
372 return 0;
373
374 frame.fp = thread_saved_fp(p);
375 frame.sp = thread_saved_sp(p);
376 frame.pc = thread_saved_pc(p);
377 do {
378 int ret = unwind_frame(&frame);
379 if (ret < 0)
380 return 0;
381 if (!in_sched_functions(frame.pc))
382 return frame.pc;
383 } while (count ++ < 16);
384 return 0;
385}
386
387unsigned long arch_align_stack(unsigned long sp)
388{
389 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
390 sp -= get_random_int() & ~PAGE_MASK;
391 return sp & ~0xf;
392}
393
394static unsigned long randomize_base(unsigned long base)
395{
396 unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1;
397 return randomize_range(base, range_end, 0) ? : base;
398}
399
400unsigned long arch_randomize_brk(struct mm_struct *mm)
401{
402 return randomize_base(mm->brk);
403}
404
405unsigned long randomize_et_dyn(unsigned long base)
406{
407 return randomize_base(base);
408}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
new file mode 100644
index 000000000000..ac3550ecc7b5
--- /dev/null
+++ b/arch/arm64/kernel/ptrace.c
@@ -0,0 +1,1126 @@
1/*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/ptrace.h>
27#include <linux/user.h>
28#include <linux/security.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/uaccess.h>
32#include <linux/perf_event.h>
33#include <linux/hw_breakpoint.h>
34#include <linux/regset.h>
35#include <linux/tracehook.h>
36#include <linux/elf.h>
37
38#include <asm/compat.h>
39#include <asm/debug-monitors.h>
40#include <asm/pgtable.h>
41#include <asm/traps.h>
42#include <asm/system_misc.h>
43
44/*
45 * TODO: does not yet catch signals sent when the child dies.
46 * in exit.c or in signal.c.
47 */
48
49/*
50 * Called by kernel/ptrace.c when detaching..
51 */
52void ptrace_disable(struct task_struct *child)
53{
54}
55
56/*
57 * Handle hitting a breakpoint.
58 */
59static int ptrace_break(struct pt_regs *regs)
60{
61 siginfo_t info = {
62 .si_signo = SIGTRAP,
63 .si_errno = 0,
64 .si_code = TRAP_BRKPT,
65 .si_addr = (void __user *)instruction_pointer(regs),
66 };
67
68 force_sig_info(SIGTRAP, &info, current);
69 return 0;
70}
71
72static int arm64_break_trap(unsigned long addr, unsigned int esr,
73 struct pt_regs *regs)
74{
75 return ptrace_break(regs);
76}
77
78#ifdef CONFIG_HAVE_HW_BREAKPOINT
79/*
80 * Handle hitting a HW-breakpoint.
81 */
82static void ptrace_hbptriggered(struct perf_event *bp,
83 struct perf_sample_data *data,
84 struct pt_regs *regs)
85{
86 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
87 siginfo_t info = {
88 .si_signo = SIGTRAP,
89 .si_errno = 0,
90 .si_code = TRAP_HWBKPT,
91 .si_addr = (void __user *)(bkpt->trigger),
92 };
93
94#ifdef CONFIG_COMPAT
95 int i;
96
97 if (!is_compat_task())
98 goto send_sig;
99
100 for (i = 0; i < ARM_MAX_BRP; ++i) {
101 if (current->thread.debug.hbp_break[i] == bp) {
102 info.si_errno = (i << 1) + 1;
103 break;
104 }
105 }
106 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
107 if (current->thread.debug.hbp_watch[i] == bp) {
108 info.si_errno = -((i << 1) + 1);
109 break;
110 }
111 }
112
113send_sig:
114#endif
115 force_sig_info(SIGTRAP, &info, current);
116}
117
118/*
119 * Unregister breakpoints from this task and reset the pointers in
120 * the thread_struct.
121 */
122void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
123{
124 int i;
125 struct thread_struct *t = &tsk->thread;
126
127 for (i = 0; i < ARM_MAX_BRP; i++) {
128 if (t->debug.hbp_break[i]) {
129 unregister_hw_breakpoint(t->debug.hbp_break[i]);
130 t->debug.hbp_break[i] = NULL;
131 }
132 }
133
134 for (i = 0; i < ARM_MAX_WRP; i++) {
135 if (t->debug.hbp_watch[i]) {
136 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
137 t->debug.hbp_watch[i] = NULL;
138 }
139 }
140}
141
142void ptrace_hw_copy_thread(struct task_struct *tsk)
143{
144 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
145}
146
147static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
148 struct task_struct *tsk,
149 unsigned long idx)
150{
151 struct perf_event *bp = ERR_PTR(-EINVAL);
152
153 switch (note_type) {
154 case NT_ARM_HW_BREAK:
155 if (idx < ARM_MAX_BRP)
156 bp = tsk->thread.debug.hbp_break[idx];
157 break;
158 case NT_ARM_HW_WATCH:
159 if (idx < ARM_MAX_WRP)
160 bp = tsk->thread.debug.hbp_watch[idx];
161 break;
162 }
163
164 return bp;
165}
166
167static int ptrace_hbp_set_event(unsigned int note_type,
168 struct task_struct *tsk,
169 unsigned long idx,
170 struct perf_event *bp)
171{
172 int err = -EINVAL;
173
174 switch (note_type) {
175 case NT_ARM_HW_BREAK:
176 if (idx < ARM_MAX_BRP) {
177 tsk->thread.debug.hbp_break[idx] = bp;
178 err = 0;
179 }
180 break;
181 case NT_ARM_HW_WATCH:
182 if (idx < ARM_MAX_WRP) {
183 tsk->thread.debug.hbp_watch[idx] = bp;
184 err = 0;
185 }
186 break;
187 }
188
189 return err;
190}
191
192static struct perf_event *ptrace_hbp_create(unsigned int note_type,
193 struct task_struct *tsk,
194 unsigned long idx)
195{
196 struct perf_event *bp;
197 struct perf_event_attr attr;
198 int err, type;
199
200 switch (note_type) {
201 case NT_ARM_HW_BREAK:
202 type = HW_BREAKPOINT_X;
203 break;
204 case NT_ARM_HW_WATCH:
205 type = HW_BREAKPOINT_RW;
206 break;
207 default:
208 return ERR_PTR(-EINVAL);
209 }
210
211 ptrace_breakpoint_init(&attr);
212
213 /*
214 * Initialise fields to sane defaults
215 * (i.e. values that will pass validation).
216 */
217 attr.bp_addr = 0;
218 attr.bp_len = HW_BREAKPOINT_LEN_4;
219 attr.bp_type = type;
220 attr.disabled = 1;
221
222 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
223 if (IS_ERR(bp))
224 return bp;
225
226 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
227 if (err)
228 return ERR_PTR(err);
229
230 return bp;
231}
232
233static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
234 struct arch_hw_breakpoint_ctrl ctrl,
235 struct perf_event_attr *attr)
236{
237 int err, len, type;
238
239 err = arch_bp_generic_fields(ctrl, &len, &type);
240 if (err)
241 return err;
242
243 switch (note_type) {
244 case NT_ARM_HW_BREAK:
245 if ((type & HW_BREAKPOINT_X) != type)
246 return -EINVAL;
247 break;
248 case NT_ARM_HW_WATCH:
249 if ((type & HW_BREAKPOINT_RW) != type)
250 return -EINVAL;
251 break;
252 default:
253 return -EINVAL;
254 }
255
256 attr->bp_len = len;
257 attr->bp_type = type;
258 attr->disabled = !ctrl.enabled;
259
260 return 0;
261}
262
263static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
264{
265 u8 num;
266 u32 reg = 0;
267
268 switch (note_type) {
269 case NT_ARM_HW_BREAK:
270 num = hw_breakpoint_slots(TYPE_INST);
271 break;
272 case NT_ARM_HW_WATCH:
273 num = hw_breakpoint_slots(TYPE_DATA);
274 break;
275 default:
276 return -EINVAL;
277 }
278
279 reg |= debug_monitors_arch();
280 reg <<= 8;
281 reg |= num;
282
283 *info = reg;
284 return 0;
285}
286
287static int ptrace_hbp_get_ctrl(unsigned int note_type,
288 struct task_struct *tsk,
289 unsigned long idx,
290 u32 *ctrl)
291{
292 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
293
294 if (IS_ERR(bp))
295 return PTR_ERR(bp);
296
297 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
298 return 0;
299}
300
301static int ptrace_hbp_get_addr(unsigned int note_type,
302 struct task_struct *tsk,
303 unsigned long idx,
304 u64 *addr)
305{
306 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
307
308 if (IS_ERR(bp))
309 return PTR_ERR(bp);
310
311 *addr = bp ? bp->attr.bp_addr : 0;
312 return 0;
313}
314
315static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
316 struct task_struct *tsk,
317 unsigned long idx)
318{
319 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
320
321 if (!bp)
322 bp = ptrace_hbp_create(note_type, tsk, idx);
323
324 return bp;
325}
326
327static int ptrace_hbp_set_ctrl(unsigned int note_type,
328 struct task_struct *tsk,
329 unsigned long idx,
330 u32 uctrl)
331{
332 int err;
333 struct perf_event *bp;
334 struct perf_event_attr attr;
335 struct arch_hw_breakpoint_ctrl ctrl;
336
337 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
338 if (IS_ERR(bp)) {
339 err = PTR_ERR(bp);
340 return err;
341 }
342
343 attr = bp->attr;
344 decode_ctrl_reg(uctrl, &ctrl);
345 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
346 if (err)
347 return err;
348
349 return modify_user_hw_breakpoint(bp, &attr);
350}
351
352static int ptrace_hbp_set_addr(unsigned int note_type,
353 struct task_struct *tsk,
354 unsigned long idx,
355 u64 addr)
356{
357 int err;
358 struct perf_event *bp;
359 struct perf_event_attr attr;
360
361 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
362 if (IS_ERR(bp)) {
363 err = PTR_ERR(bp);
364 return err;
365 }
366
367 attr = bp->attr;
368 attr.bp_addr = addr;
369 err = modify_user_hw_breakpoint(bp, &attr);
370 return err;
371}
372
373#define PTRACE_HBP_ADDR_SZ sizeof(u64)
374#define PTRACE_HBP_CTRL_SZ sizeof(u32)
375#define PTRACE_HBP_REG_OFF sizeof(u32)
376
377static int hw_break_get(struct task_struct *target,
378 const struct user_regset *regset,
379 unsigned int pos, unsigned int count,
380 void *kbuf, void __user *ubuf)
381{
382 unsigned int note_type = regset->core_note_type;
383 int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit;
384 u32 info, ctrl;
385 u64 addr;
386
387 /* Resource info */
388 ret = ptrace_hbp_get_resource_info(note_type, &info);
389 if (ret)
390 return ret;
391
392 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4);
393 if (ret)
394 return ret;
395
396 /* (address, ctrl) registers */
397 limit = regset->n * regset->size;
398 while (count && offset < limit) {
399 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
400 if (ret)
401 return ret;
402 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
403 offset, offset + PTRACE_HBP_ADDR_SZ);
404 if (ret)
405 return ret;
406 offset += PTRACE_HBP_ADDR_SZ;
407
408 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
409 if (ret)
410 return ret;
411 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
412 offset, offset + PTRACE_HBP_CTRL_SZ);
413 if (ret)
414 return ret;
415 offset += PTRACE_HBP_CTRL_SZ;
416 idx++;
417 }
418
419 return 0;
420}
421
422static int hw_break_set(struct task_struct *target,
423 const struct user_regset *regset,
424 unsigned int pos, unsigned int count,
425 const void *kbuf, const void __user *ubuf)
426{
427 unsigned int note_type = regset->core_note_type;
428 int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit;
429 u32 ctrl;
430 u64 addr;
431
432 /* Resource info */
433 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
434 if (ret)
435 return ret;
436
437 /* (address, ctrl) registers */
438 limit = regset->n * regset->size;
439 while (count && offset < limit) {
440 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
441 offset, offset + PTRACE_HBP_ADDR_SZ);
442 if (ret)
443 return ret;
444 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
445 if (ret)
446 return ret;
447 offset += PTRACE_HBP_ADDR_SZ;
448
449 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
450 offset, offset + PTRACE_HBP_CTRL_SZ);
451 if (ret)
452 return ret;
453 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
454 if (ret)
455 return ret;
456 offset += PTRACE_HBP_CTRL_SZ;
457 idx++;
458 }
459
460 return 0;
461}
462#endif /* CONFIG_HAVE_HW_BREAKPOINT */
463
464static int gpr_get(struct task_struct *target,
465 const struct user_regset *regset,
466 unsigned int pos, unsigned int count,
467 void *kbuf, void __user *ubuf)
468{
469 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
470 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
471}
472
473static int gpr_set(struct task_struct *target, const struct user_regset *regset,
474 unsigned int pos, unsigned int count,
475 const void *kbuf, const void __user *ubuf)
476{
477 int ret;
478 struct user_pt_regs newregs;
479
480 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
481 if (ret)
482 return ret;
483
484 if (!valid_user_regs(&newregs))
485 return -EINVAL;
486
487 task_pt_regs(target)->user_regs = newregs;
488 return 0;
489}
490
491/*
492 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
493 */
494static int fpr_get(struct task_struct *target, const struct user_regset *regset,
495 unsigned int pos, unsigned int count,
496 void *kbuf, void __user *ubuf)
497{
498 struct user_fpsimd_state *uregs;
499 uregs = &target->thread.fpsimd_state.user_fpsimd;
500 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
501}
502
503static int fpr_set(struct task_struct *target, const struct user_regset *regset,
504 unsigned int pos, unsigned int count,
505 const void *kbuf, const void __user *ubuf)
506{
507 int ret;
508 struct user_fpsimd_state newstate;
509
510 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
511 if (ret)
512 return ret;
513
514 target->thread.fpsimd_state.user_fpsimd = newstate;
515 return ret;
516}
517
518static int tls_get(struct task_struct *target, const struct user_regset *regset,
519 unsigned int pos, unsigned int count,
520 void *kbuf, void __user *ubuf)
521{
522 unsigned long *tls = &target->thread.tp_value;
523 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
524}
525
526static int tls_set(struct task_struct *target, const struct user_regset *regset,
527 unsigned int pos, unsigned int count,
528 const void *kbuf, const void __user *ubuf)
529{
530 int ret;
531 unsigned long tls;
532
533 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
534 if (ret)
535 return ret;
536
537 target->thread.tp_value = tls;
538 return ret;
539}
540
541enum aarch64_regset {
542 REGSET_GPR,
543 REGSET_FPR,
544 REGSET_TLS,
545#ifdef CONFIG_HAVE_HW_BREAKPOINT
546 REGSET_HW_BREAK,
547 REGSET_HW_WATCH,
548#endif
549};
550
551static const struct user_regset aarch64_regsets[] = {
552 [REGSET_GPR] = {
553 .core_note_type = NT_PRSTATUS,
554 .n = sizeof(struct user_pt_regs) / sizeof(u64),
555 .size = sizeof(u64),
556 .align = sizeof(u64),
557 .get = gpr_get,
558 .set = gpr_set
559 },
560 [REGSET_FPR] = {
561 .core_note_type = NT_PRFPREG,
562 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
563 /*
564 * We pretend we have 32-bit registers because the fpsr and
565 * fpcr are 32-bits wide.
566 */
567 .size = sizeof(u32),
568 .align = sizeof(u32),
569 .get = fpr_get,
570 .set = fpr_set
571 },
572 [REGSET_TLS] = {
573 .core_note_type = NT_ARM_TLS,
574 .n = 1,
575 .size = sizeof(void *),
576 .align = sizeof(void *),
577 .get = tls_get,
578 .set = tls_set,
579 },
580#ifdef CONFIG_HAVE_HW_BREAKPOINT
581 [REGSET_HW_BREAK] = {
582 .core_note_type = NT_ARM_HW_BREAK,
583 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
584 .size = sizeof(u32),
585 .align = sizeof(u32),
586 .get = hw_break_get,
587 .set = hw_break_set,
588 },
589 [REGSET_HW_WATCH] = {
590 .core_note_type = NT_ARM_HW_WATCH,
591 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
592 .size = sizeof(u32),
593 .align = sizeof(u32),
594 .get = hw_break_get,
595 .set = hw_break_set,
596 },
597#endif
598};
599
600static const struct user_regset_view user_aarch64_view = {
601 .name = "aarch64", .e_machine = EM_AARCH64,
602 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
603};
604
605#ifdef CONFIG_COMPAT
606#include <linux/compat.h>
607
608enum compat_regset {
609 REGSET_COMPAT_GPR,
610 REGSET_COMPAT_VFP,
611};
612
613static int compat_gpr_get(struct task_struct *target,
614 const struct user_regset *regset,
615 unsigned int pos, unsigned int count,
616 void *kbuf, void __user *ubuf)
617{
618 int ret = 0;
619 unsigned int i, start, num_regs;
620
621 /* Calculate the number of AArch32 registers contained in count */
622 num_regs = count / regset->size;
623
624 /* Convert pos into an register number */
625 start = pos / regset->size;
626
627 if (start + num_regs > regset->n)
628 return -EIO;
629
630 for (i = 0; i < num_regs; ++i) {
631 unsigned int idx = start + i;
632 void *reg;
633
634 switch (idx) {
635 case 15:
636 reg = (void *)&task_pt_regs(target)->pc;
637 break;
638 case 16:
639 reg = (void *)&task_pt_regs(target)->pstate;
640 break;
641 case 17:
642 reg = (void *)&task_pt_regs(target)->orig_x0;
643 break;
644 default:
645 reg = (void *)&task_pt_regs(target)->regs[idx];
646 }
647
648 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
649
650 if (ret)
651 break;
652 else
653 ubuf += sizeof(compat_ulong_t);
654 }
655
656 return ret;
657}
658
659static int compat_gpr_set(struct task_struct *target,
660 const struct user_regset *regset,
661 unsigned int pos, unsigned int count,
662 const void *kbuf, const void __user *ubuf)
663{
664 struct pt_regs newregs;
665 int ret = 0;
666 unsigned int i, start, num_regs;
667
668 /* Calculate the number of AArch32 registers contained in count */
669 num_regs = count / regset->size;
670
671 /* Convert pos into an register number */
672 start = pos / regset->size;
673
674 if (start + num_regs > regset->n)
675 return -EIO;
676
677 newregs = *task_pt_regs(target);
678
679 for (i = 0; i < num_regs; ++i) {
680 unsigned int idx = start + i;
681 void *reg;
682
683 switch (idx) {
684 case 15:
685 reg = (void *)&newregs.pc;
686 break;
687 case 16:
688 reg = (void *)&newregs.pstate;
689 break;
690 case 17:
691 reg = (void *)&newregs.orig_x0;
692 break;
693 default:
694 reg = (void *)&newregs.regs[idx];
695 }
696
697 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
698
699 if (ret)
700 goto out;
701 else
702 ubuf += sizeof(compat_ulong_t);
703 }
704
705 if (valid_user_regs(&newregs.user_regs))
706 *task_pt_regs(target) = newregs;
707 else
708 ret = -EINVAL;
709
710out:
711 return ret;
712}
713
714static int compat_vfp_get(struct task_struct *target,
715 const struct user_regset *regset,
716 unsigned int pos, unsigned int count,
717 void *kbuf, void __user *ubuf)
718{
719 struct user_fpsimd_state *uregs;
720 compat_ulong_t fpscr;
721 int ret;
722
723 uregs = &target->thread.fpsimd_state.user_fpsimd;
724
725 /*
726 * The VFP registers are packed into the fpsimd_state, so they all sit
727 * nicely together for us. We just need to create the fpscr separately.
728 */
729 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
730 VFP_STATE_SIZE - sizeof(compat_ulong_t));
731
732 if (count && !ret) {
733 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
734 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
735 ret = put_user(fpscr, (compat_ulong_t *)ubuf);
736 }
737
738 return ret;
739}
740
741static int compat_vfp_set(struct task_struct *target,
742 const struct user_regset *regset,
743 unsigned int pos, unsigned int count,
744 const void *kbuf, const void __user *ubuf)
745{
746 struct user_fpsimd_state *uregs;
747 compat_ulong_t fpscr;
748 int ret;
749
750 if (pos + count > VFP_STATE_SIZE)
751 return -EIO;
752
753 uregs = &target->thread.fpsimd_state.user_fpsimd;
754
755 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
756 VFP_STATE_SIZE - sizeof(compat_ulong_t));
757
758 if (count && !ret) {
759 ret = get_user(fpscr, (compat_ulong_t *)ubuf);
760 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
761 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
762 }
763
764 return ret;
765}
766
767static const struct user_regset aarch32_regsets[] = {
768 [REGSET_COMPAT_GPR] = {
769 .core_note_type = NT_PRSTATUS,
770 .n = COMPAT_ELF_NGREG,
771 .size = sizeof(compat_elf_greg_t),
772 .align = sizeof(compat_elf_greg_t),
773 .get = compat_gpr_get,
774 .set = compat_gpr_set
775 },
776 [REGSET_COMPAT_VFP] = {
777 .core_note_type = NT_ARM_VFP,
778 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
779 .size = sizeof(compat_ulong_t),
780 .align = sizeof(compat_ulong_t),
781 .get = compat_vfp_get,
782 .set = compat_vfp_set
783 },
784};
785
786static const struct user_regset_view user_aarch32_view = {
787 .name = "aarch32", .e_machine = EM_ARM,
788 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
789};
790
791int aarch32_break_trap(struct pt_regs *regs)
792{
793 unsigned int instr;
794 bool bp = false;
795 void __user *pc = (void __user *)instruction_pointer(regs);
796
797 if (compat_thumb_mode(regs)) {
798 /* get 16-bit Thumb instruction */
799 get_user(instr, (u16 __user *)pc);
800 if (instr == AARCH32_BREAK_THUMB2_LO) {
801 /* get second half of 32-bit Thumb-2 instruction */
802 get_user(instr, (u16 __user *)(pc + 2));
803 bp = instr == AARCH32_BREAK_THUMB2_HI;
804 } else {
805 bp = instr == AARCH32_BREAK_THUMB;
806 }
807 } else {
808 /* 32-bit ARM instruction */
809 get_user(instr, (u32 __user *)pc);
810 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
811 }
812
813 if (bp)
814 return ptrace_break(regs);
815 return 1;
816}
817
818static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
819 compat_ulong_t __user *ret)
820{
821 compat_ulong_t tmp;
822
823 if (off & 3)
824 return -EIO;
825
826 if (off == PT_TEXT_ADDR)
827 tmp = tsk->mm->start_code;
828 else if (off == PT_DATA_ADDR)
829 tmp = tsk->mm->start_data;
830 else if (off == PT_TEXT_END_ADDR)
831 tmp = tsk->mm->end_code;
832 else if (off < sizeof(compat_elf_gregset_t))
833 return copy_regset_to_user(tsk, &user_aarch32_view,
834 REGSET_COMPAT_GPR, off,
835 sizeof(compat_ulong_t), ret);
836 else if (off >= COMPAT_USER_SZ)
837 return -EIO;
838 else
839 tmp = 0;
840
841 return put_user(tmp, ret);
842}
843
844static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
845 compat_ulong_t val)
846{
847 int ret;
848
849 if (off & 3 || off >= COMPAT_USER_SZ)
850 return -EIO;
851
852 if (off >= sizeof(compat_elf_gregset_t))
853 return 0;
854
855 ret = copy_regset_from_user(tsk, &user_aarch32_view,
856 REGSET_COMPAT_GPR, off,
857 sizeof(compat_ulong_t),
858 &val);
859 return ret;
860}
861
862#ifdef CONFIG_HAVE_HW_BREAKPOINT
863
864/*
865 * Convert a virtual register number into an index for a thread_info
866 * breakpoint array. Breakpoints are identified using positive numbers
867 * whilst watchpoints are negative. The registers are laid out as pairs
868 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
869 * Register 0 is reserved for describing resource information.
870 */
871static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
872{
873 return (abs(num) - 1) >> 1;
874}
875
876static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
877{
878 u8 num_brps, num_wrps, debug_arch, wp_len;
879 u32 reg = 0;
880
881 num_brps = hw_breakpoint_slots(TYPE_INST);
882 num_wrps = hw_breakpoint_slots(TYPE_DATA);
883
884 debug_arch = debug_monitors_arch();
885 wp_len = 8;
886 reg |= debug_arch;
887 reg <<= 8;
888 reg |= wp_len;
889 reg <<= 8;
890 reg |= num_wrps;
891 reg <<= 8;
892 reg |= num_brps;
893
894 *kdata = reg;
895 return 0;
896}
897
898static int compat_ptrace_hbp_get(unsigned int note_type,
899 struct task_struct *tsk,
900 compat_long_t num,
901 u32 *kdata)
902{
903 u64 addr = 0;
904 u32 ctrl = 0;
905
906 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
907
908 if (num & 1) {
909 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
910 *kdata = (u32)addr;
911 } else {
912 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
913 *kdata = ctrl;
914 }
915
916 return err;
917}
918
919static int compat_ptrace_hbp_set(unsigned int note_type,
920 struct task_struct *tsk,
921 compat_long_t num,
922 u32 *kdata)
923{
924 u64 addr;
925 u32 ctrl;
926
927 int err, idx = compat_ptrace_hbp_num_to_idx(num);
928
929 if (num & 1) {
930 addr = *kdata;
931 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
932 } else {
933 ctrl = *kdata;
934 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
935 }
936
937 return err;
938}
939
940static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
941 compat_ulong_t __user *data)
942{
943 int ret;
944 u32 kdata;
945 mm_segment_t old_fs = get_fs();
946
947 set_fs(KERNEL_DS);
948 /* Watchpoint */
949 if (num < 0) {
950 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
951 /* Resource info */
952 } else if (num == 0) {
953 ret = compat_ptrace_hbp_get_resource_info(&kdata);
954 /* Breakpoint */
955 } else {
956 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
957 }
958 set_fs(old_fs);
959
960 if (!ret)
961 ret = put_user(kdata, data);
962
963 return ret;
964}
965
966static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
967 compat_ulong_t __user *data)
968{
969 int ret;
970 u32 kdata = 0;
971 mm_segment_t old_fs = get_fs();
972
973 if (num == 0)
974 return 0;
975
976 ret = get_user(kdata, data);
977 if (ret)
978 return ret;
979
980 set_fs(KERNEL_DS);
981 if (num < 0)
982 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
983 else
984 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
985 set_fs(old_fs);
986
987 return ret;
988}
989#endif /* CONFIG_HAVE_HW_BREAKPOINT */
990
991long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
992 compat_ulong_t caddr, compat_ulong_t cdata)
993{
994 unsigned long addr = caddr;
995 unsigned long data = cdata;
996 void __user *datap = compat_ptr(data);
997 int ret;
998
999 switch (request) {
1000 case PTRACE_PEEKUSR:
1001 ret = compat_ptrace_read_user(child, addr, datap);
1002 break;
1003
1004 case PTRACE_POKEUSR:
1005 ret = compat_ptrace_write_user(child, addr, data);
1006 break;
1007
1008 case COMPAT_PTRACE_GETREGS:
1009 ret = copy_regset_to_user(child,
1010 &user_aarch32_view,
1011 REGSET_COMPAT_GPR,
1012 0, sizeof(compat_elf_gregset_t),
1013 datap);
1014 break;
1015
1016 case COMPAT_PTRACE_SETREGS:
1017 ret = copy_regset_from_user(child,
1018 &user_aarch32_view,
1019 REGSET_COMPAT_GPR,
1020 0, sizeof(compat_elf_gregset_t),
1021 datap);
1022 break;
1023
1024 case COMPAT_PTRACE_GET_THREAD_AREA:
1025 ret = put_user((compat_ulong_t)child->thread.tp_value,
1026 (compat_ulong_t __user *)datap);
1027 break;
1028
1029 case COMPAT_PTRACE_SET_SYSCALL:
1030 task_pt_regs(child)->syscallno = data;
1031 ret = 0;
1032 break;
1033
1034 case COMPAT_PTRACE_GETVFPREGS:
1035 ret = copy_regset_to_user(child,
1036 &user_aarch32_view,
1037 REGSET_COMPAT_VFP,
1038 0, VFP_STATE_SIZE,
1039 datap);
1040 break;
1041
1042 case COMPAT_PTRACE_SETVFPREGS:
1043 ret = copy_regset_from_user(child,
1044 &user_aarch32_view,
1045 REGSET_COMPAT_VFP,
1046 0, VFP_STATE_SIZE,
1047 datap);
1048 break;
1049
1050#ifdef CONFIG_HAVE_HW_BREAKPOINT
1051 case COMPAT_PTRACE_GETHBPREGS:
1052 ret = compat_ptrace_gethbpregs(child, addr, datap);
1053 break;
1054
1055 case COMPAT_PTRACE_SETHBPREGS:
1056 ret = compat_ptrace_sethbpregs(child, addr, datap);
1057 break;
1058#endif
1059
1060 default:
1061 ret = compat_ptrace_request(child, request, addr,
1062 data);
1063 break;
1064 }
1065
1066 return ret;
1067}
1068#endif /* CONFIG_COMPAT */
1069
1070const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1071{
1072#ifdef CONFIG_COMPAT
1073 if (is_compat_thread(task_thread_info(task)))
1074 return &user_aarch32_view;
1075#endif
1076 return &user_aarch64_view;
1077}
1078
1079long arch_ptrace(struct task_struct *child, long request,
1080 unsigned long addr, unsigned long data)
1081{
1082 return ptrace_request(child, request, addr, data);
1083}
1084
1085
1086static int __init ptrace_break_init(void)
1087{
1088 hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
1089 TRAP_BRKPT, "ptrace BRK handler");
1090 return 0;
1091}
1092core_initcall(ptrace_break_init);
1093
1094
1095asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
1096{
1097 unsigned long saved_reg;
1098
1099 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1100 return regs->syscallno;
1101
1102 if (is_compat_task()) {
1103 /* AArch32 uses ip (r12) for scratch */
1104 saved_reg = regs->regs[12];
1105 regs->regs[12] = dir;
1106 } else {
1107 /*
1108 * Save X7. X7 is used to denote syscall entry/exit:
1109 * X7 = 0 -> entry, = 1 -> exit
1110 */
1111 saved_reg = regs->regs[7];
1112 regs->regs[7] = dir;
1113 }
1114
1115 if (dir)
1116 tracehook_report_syscall_exit(regs, 0);
1117 else if (tracehook_report_syscall_entry(regs))
1118 regs->syscallno = ~0UL;
1119
1120 if (is_compat_task())
1121 regs->regs[12] = saved_reg;
1122 else
1123 regs->regs[7] = saved_reg;
1124
1125 return regs->syscallno;
1126}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
new file mode 100644
index 000000000000..48ffb9fb3fe3
--- /dev/null
+++ b/arch/arm64/kernel/setup.c
@@ -0,0 +1,347 @@
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/ioport.h>
24#include <linux/delay.h>
25#include <linux/utsname.h>
26#include <linux/initrd.h>
27#include <linux/console.h>
28#include <linux/bootmem.h>
29#include <linux/seq_file.h>
30#include <linux/screen_info.h>
31#include <linux/init.h>
32#include <linux/kexec.h>
33#include <linux/crash_dump.h>
34#include <linux/root_dev.h>
35#include <linux/cpu.h>
36#include <linux/interrupt.h>
37#include <linux/smp.h>
38#include <linux/fs.h>
39#include <linux/proc_fs.h>
40#include <linux/memblock.h>
41#include <linux/of_fdt.h>
42
43#include <asm/cputype.h>
44#include <asm/elf.h>
45#include <asm/cputable.h>
46#include <asm/sections.h>
47#include <asm/setup.h>
48#include <asm/cacheflush.h>
49#include <asm/tlbflush.h>
50#include <asm/traps.h>
51#include <asm/memblock.h>
52
53unsigned int processor_id;
54EXPORT_SYMBOL(processor_id);
55
56unsigned int elf_hwcap __read_mostly;
57EXPORT_SYMBOL_GPL(elf_hwcap);
58
59static const char *cpu_name;
60static const char *machine_name;
61phys_addr_t __fdt_pointer __initdata;
62
63/*
64 * Standard memory resources
65 */
66static struct resource mem_res[] = {
67 {
68 .name = "Kernel code",
69 .start = 0,
70 .end = 0,
71 .flags = IORESOURCE_MEM
72 },
73 {
74 .name = "Kernel data",
75 .start = 0,
76 .end = 0,
77 .flags = IORESOURCE_MEM
78 }
79};
80
81#define kernel_code mem_res[0]
82#define kernel_data mem_res[1]
83
84void __init early_print(const char *str, ...)
85{
86 char buf[256];
87 va_list ap;
88
89 va_start(ap, str);
90 vsnprintf(buf, sizeof(buf), str, ap);
91 va_end(ap);
92
93 printk("%s", buf);
94}
95
96static void __init setup_processor(void)
97{
98 struct cpu_info *cpu_info;
99
100 /*
101 * locate processor in the list of supported processor
102 * types. The linker builds this table for us from the
103 * entries in arch/arm/mm/proc.S
104 */
105 cpu_info = lookup_processor_type(read_cpuid_id());
106 if (!cpu_info) {
107 printk("CPU configuration botched (ID %08x), unable to continue.\n",
108 read_cpuid_id());
109 while (1);
110 }
111
112 cpu_name = cpu_info->cpu_name;
113
114 printk("CPU: %s [%08x] revision %d\n",
115 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
116
117 sprintf(init_utsname()->machine, "aarch64");
118 elf_hwcap = 0;
119}
120
121static void __init setup_machine_fdt(phys_addr_t dt_phys)
122{
123 struct boot_param_header *devtree;
124 unsigned long dt_root;
125
126 /* Check we have a non-NULL DT pointer */
127 if (!dt_phys) {
128 early_print("\n"
129 "Error: NULL or invalid device tree blob\n"
130 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
131 "\nPlease check your bootloader.\n");
132
133 while (true)
134 cpu_relax();
135
136 }
137
138 devtree = phys_to_virt(dt_phys);
139
140 /* Check device tree validity */
141 if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
142 early_print("\n"
143 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
144 "Expected 0x%x, found 0x%x\n"
145 "\nPlease check your bootloader.\n",
146 dt_phys, devtree, OF_DT_HEADER,
147 be32_to_cpu(devtree->magic));
148
149 while (true)
150 cpu_relax();
151 }
152
153 initial_boot_params = devtree;
154 dt_root = of_get_flat_dt_root();
155
156 machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
157 if (!machine_name)
158 machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
159 if (!machine_name)
160 machine_name = "<unknown>";
161 pr_info("Machine: %s\n", machine_name);
162
163 /* Retrieve various information from the /chosen node */
164 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
165 /* Initialize {size,address}-cells info */
166 of_scan_flat_dt(early_init_dt_scan_root, NULL);
167 /* Setup memory, calling early_init_dt_add_memory_arch */
168 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
169}
170
171void __init early_init_dt_add_memory_arch(u64 base, u64 size)
172{
173 size &= PAGE_MASK;
174 memblock_add(base, size);
175}
176
177void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
178{
179 return __va(memblock_alloc(size, align));
180}
181
182/*
183 * Limit the memory size that was specified via FDT.
184 */
185static int __init early_mem(char *p)
186{
187 phys_addr_t limit;
188
189 if (!p)
190 return 1;
191
192 limit = memparse(p, &p) & PAGE_MASK;
193 pr_notice("Memory limited to %lldMB\n", limit >> 20);
194
195 memblock_enforce_memory_limit(limit);
196
197 return 0;
198}
199early_param("mem", early_mem);
200
201static void __init request_standard_resources(void)
202{
203 struct memblock_region *region;
204 struct resource *res;
205
206 kernel_code.start = virt_to_phys(_text);
207 kernel_code.end = virt_to_phys(_etext - 1);
208 kernel_data.start = virt_to_phys(_sdata);
209 kernel_data.end = virt_to_phys(_end - 1);
210
211 for_each_memblock(memory, region) {
212 res = alloc_bootmem_low(sizeof(*res));
213 res->name = "System RAM";
214 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
215 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
216 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
217
218 request_resource(&iomem_resource, res);
219
220 if (kernel_code.start >= res->start &&
221 kernel_code.end <= res->end)
222 request_resource(res, &kernel_code);
223 if (kernel_data.start >= res->start &&
224 kernel_data.end <= res->end)
225 request_resource(res, &kernel_data);
226 }
227}
228
229void __init setup_arch(char **cmdline_p)
230{
231 setup_processor();
232
233 setup_machine_fdt(__fdt_pointer);
234
235 init_mm.start_code = (unsigned long) _text;
236 init_mm.end_code = (unsigned long) _etext;
237 init_mm.end_data = (unsigned long) _edata;
238 init_mm.brk = (unsigned long) _end;
239
240 *cmdline_p = boot_command_line;
241
242 parse_early_param();
243
244 arm64_memblock_init();
245
246 paging_init();
247 request_standard_resources();
248
249 unflatten_device_tree();
250
251#ifdef CONFIG_SMP
252 smp_init_cpus();
253#endif
254
255#ifdef CONFIG_VT
256#if defined(CONFIG_VGA_CONSOLE)
257 conswitchp = &vga_con;
258#elif defined(CONFIG_DUMMY_CONSOLE)
259 conswitchp = &dummy_con;
260#endif
261#endif
262}
263
264static DEFINE_PER_CPU(struct cpu, cpu_data);
265
266static int __init topology_init(void)
267{
268 int i;
269
270 for_each_possible_cpu(i) {
271 struct cpu *cpu = &per_cpu(cpu_data, i);
272 cpu->hotpluggable = 1;
273 register_cpu(cpu, i);
274 }
275
276 return 0;
277}
278subsys_initcall(topology_init);
279
280static const char *hwcap_str[] = {
281 "fp",
282 "asimd",
283 NULL
284};
285
286static int c_show(struct seq_file *m, void *v)
287{
288 int i;
289
290 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
291 cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
292
293 for_each_online_cpu(i) {
294 /*
295 * glibc reads /proc/cpuinfo to determine the number of
296 * online processors, looking for lines beginning with
297 * "processor". Give glibc what it expects.
298 */
299#ifdef CONFIG_SMP
300 seq_printf(m, "processor\t: %d\n", i);
301#endif
302 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
303 loops_per_jiffy / (500000UL/HZ),
304 loops_per_jiffy / (5000UL/HZ) % 100);
305 }
306
307 /* dump out the processor features */
308 seq_puts(m, "Features\t: ");
309
310 for (i = 0; hwcap_str[i]; i++)
311 if (elf_hwcap & (1 << i))
312 seq_printf(m, "%s ", hwcap_str[i]);
313
314 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
315 seq_printf(m, "CPU architecture: AArch64\n");
316 seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
317 seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
318 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
319
320 seq_puts(m, "\n");
321
322 seq_printf(m, "Hardware\t: %s\n", machine_name);
323
324 return 0;
325}
326
327static void *c_start(struct seq_file *m, loff_t *pos)
328{
329 return *pos < 1 ? (void *)1 : NULL;
330}
331
332static void *c_next(struct seq_file *m, void *v, loff_t *pos)
333{
334 ++*pos;
335 return NULL;
336}
337
338static void c_stop(struct seq_file *m, void *v)
339{
340}
341
342const struct seq_operations cpuinfo_op = {
343 .start = c_start,
344 .next = c_next,
345 .stop = c_stop,
346 .show = c_show
347};
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
new file mode 100644
index 000000000000..8807ba2cf262
--- /dev/null
+++ b/arch/arm64/kernel/signal.c
@@ -0,0 +1,437 @@
1/*
2 * Based on arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/errno.h>
21#include <linux/signal.h>
22#include <linux/personality.h>
23#include <linux/freezer.h>
24#include <linux/uaccess.h>
25#include <linux/tracehook.h>
26#include <linux/ratelimit.h>
27
28#include <asm/compat.h>
29#include <asm/debug-monitors.h>
30#include <asm/elf.h>
31#include <asm/cacheflush.h>
32#include <asm/ucontext.h>
33#include <asm/unistd.h>
34#include <asm/fpsimd.h>
35#include <asm/signal32.h>
36#include <asm/vdso.h>
37
38/*
39 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
40 */
41struct rt_sigframe {
42 struct siginfo info;
43 struct ucontext uc;
44};
45
46static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
47{
48 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
49 int err;
50
51 /* dump the hardware registers to the fpsimd_state structure */
52 fpsimd_save_state(fpsimd);
53
54 /* copy the FP and status/control registers */
55 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
56 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
57 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
58
59 /* copy the magic/size information */
60 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
61 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
62
63 return err ? -EFAULT : 0;
64}
65
66static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
67{
68 struct fpsimd_state fpsimd;
69 __u32 magic, size;
70 int err = 0;
71
72 /* check the magic/size information */
73 __get_user_error(magic, &ctx->head.magic, err);
74 __get_user_error(size, &ctx->head.size, err);
75 if (err)
76 return -EFAULT;
77 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
78 return -EINVAL;
79
80 /* copy the FP and status/control registers */
81 err = __copy_from_user(fpsimd.vregs, ctx->vregs,
82 sizeof(fpsimd.vregs));
83 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
84 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
85
86 /* load the hardware registers from the fpsimd_state structure */
87 if (!err) {
88 preempt_disable();
89 fpsimd_load_state(&fpsimd);
90 preempt_enable();
91 }
92
93 return err ? -EFAULT : 0;
94}
95
96static int restore_sigframe(struct pt_regs *regs,
97 struct rt_sigframe __user *sf)
98{
99 sigset_t set;
100 int i, err;
101 struct aux_context __user *aux =
102 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
103
104 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
105 if (err == 0)
106 set_current_blocked(&set);
107
108 for (i = 0; i < 31; i++)
109 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
110 err);
111 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
112 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
113 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
114
115 /*
116 * Avoid sys_rt_sigreturn() restarting.
117 */
118 regs->syscallno = ~0UL;
119
120 err |= !valid_user_regs(&regs->user_regs);
121
122 if (err == 0)
123 err |= restore_fpsimd_context(&aux->fpsimd);
124
125 return err;
126}
127
128asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
129{
130 struct rt_sigframe __user *frame;
131
132 /* Always make any pending restarted system calls return -EINTR */
133 current_thread_info()->restart_block.fn = do_no_restart_syscall;
134
135 /*
136 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
137 * be word aligned here.
138 */
139 if (regs->sp & 15)
140 goto badframe;
141
142 frame = (struct rt_sigframe __user *)regs->sp;
143
144 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
145 goto badframe;
146
147 if (restore_sigframe(regs, frame))
148 goto badframe;
149
150 if (do_sigaltstack(&frame->uc.uc_stack,
151 NULL, regs->sp) == -EFAULT)
152 goto badframe;
153
154 return regs->regs[0];
155
156badframe:
157 if (show_unhandled_signals)
158 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
159 current->comm, task_pid_nr(current), __func__,
160 regs->pc, regs->sp);
161 force_sig(SIGSEGV, current);
162 return 0;
163}
164
165asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
166 unsigned long sp)
167{
168 return do_sigaltstack(uss, uoss, sp);
169}
170
171static int setup_sigframe(struct rt_sigframe __user *sf,
172 struct pt_regs *regs, sigset_t *set)
173{
174 int i, err = 0;
175 struct aux_context __user *aux =
176 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
177
178 for (i = 0; i < 31; i++)
179 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
180 err);
181 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
182 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
183 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
184
185 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
186
187 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
188
189 if (err == 0)
190 err |= preserve_fpsimd_context(&aux->fpsimd);
191
192 /* set the "end" magic */
193 __put_user_error(0, &aux->end.magic, err);
194 __put_user_error(0, &aux->end.size, err);
195
196 return err;
197}
198
199static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
200 int framesize)
201{
202 unsigned long sp, sp_top;
203 void __user *frame;
204
205 sp = sp_top = regs->sp;
206
207 /*
208 * This is the X/Open sanctioned signal stack switching.
209 */
210 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
211 sp = sp_top = current->sas_ss_sp + current->sas_ss_size;
212
213 /* room for stack frame (FP, LR) */
214 sp -= 16;
215
216 sp = (sp - framesize) & ~15;
217 frame = (void __user *)sp;
218
219 /*
220 * Check that we can actually write to the signal frame.
221 */
222 if (!access_ok(VERIFY_WRITE, frame, sp_top - sp))
223 frame = NULL;
224
225 return frame;
226}
227
228static int setup_return(struct pt_regs *regs, struct k_sigaction *ka,
229 void __user *frame, int usig)
230{
231 int err = 0;
232 __sigrestore_t sigtramp;
233 unsigned long __user *sp = (unsigned long __user *)regs->sp;
234
235 /* set up the stack frame */
236 __put_user_error(regs->regs[29], sp - 2, err);
237 __put_user_error(regs->regs[30], sp - 1, err);
238
239 regs->regs[0] = usig;
240 regs->regs[29] = regs->sp - 16;
241 regs->sp = (unsigned long)frame;
242 regs->pc = (unsigned long)ka->sa.sa_handler;
243
244 if (ka->sa.sa_flags & SA_RESTORER)
245 sigtramp = ka->sa.sa_restorer;
246 else
247 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
248
249 regs->regs[30] = (unsigned long)sigtramp;
250
251 return err;
252}
253
254static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
255 sigset_t *set, struct pt_regs *regs)
256{
257 struct rt_sigframe __user *frame;
258 stack_t stack;
259 int err = 0;
260
261 frame = get_sigframe(ka, regs, sizeof(*frame));
262 if (!frame)
263 return 1;
264
265 __put_user_error(0, &frame->uc.uc_flags, err);
266 __put_user_error(NULL, &frame->uc.uc_link, err);
267
268 memset(&stack, 0, sizeof(stack));
269 stack.ss_sp = (void __user *)current->sas_ss_sp;
270 stack.ss_flags = sas_ss_flags(regs->sp);
271 stack.ss_size = current->sas_ss_size;
272 err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
273
274 err |= setup_sigframe(frame, regs, set);
275 if (err == 0)
276 err = setup_return(regs, ka, frame, usig);
277
278 if (err == 0 && ka->sa.sa_flags & SA_SIGINFO) {
279 err |= copy_siginfo_to_user(&frame->info, info);
280 regs->regs[1] = (unsigned long)&frame->info;
281 regs->regs[2] = (unsigned long)&frame->uc;
282 }
283
284 return err;
285}
286
287static void setup_restart_syscall(struct pt_regs *regs)
288{
289 if (is_compat_task())
290 compat_setup_restart_syscall(regs);
291 else
292 regs->regs[8] = __NR_restart_syscall;
293}
294
295/*
296 * OK, we're invoking a handler
297 */
298static void handle_signal(unsigned long sig, struct k_sigaction *ka,
299 siginfo_t *info, struct pt_regs *regs)
300{
301 struct thread_info *thread = current_thread_info();
302 struct task_struct *tsk = current;
303 sigset_t *oldset = sigmask_to_save();
304 int usig = sig;
305 int ret;
306
307 /*
308 * translate the signal
309 */
310 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
311 usig = thread->exec_domain->signal_invmap[usig];
312
313 /*
314 * Set up the stack frame
315 */
316 if (is_compat_task()) {
317 if (ka->sa.sa_flags & SA_SIGINFO)
318 ret = compat_setup_rt_frame(usig, ka, info, oldset,
319 regs);
320 else
321 ret = compat_setup_frame(usig, ka, oldset, regs);
322 } else {
323 ret = setup_rt_frame(usig, ka, info, oldset, regs);
324 }
325
326 /*
327 * Check that the resulting registers are actually sane.
328 */
329 ret |= !valid_user_regs(&regs->user_regs);
330
331 if (ret != 0) {
332 force_sigsegv(sig, tsk);
333 return;
334 }
335
336 /*
337 * Fast forward the stepping logic so we step into the signal
338 * handler.
339 */
340 user_fastforward_single_step(tsk);
341
342 signal_delivered(sig, info, ka, regs, 0);
343}
344
345/*
346 * Note that 'init' is a special process: it doesn't get signals it doesn't
347 * want to handle. Thus you cannot kill init even with a SIGKILL even by
348 * mistake.
349 *
350 * Note that we go through the signals twice: once to check the signals that
351 * the kernel can handle, and then we build all the user-level signal handling
352 * stack-frames in one go after that.
353 */
354static void do_signal(struct pt_regs *regs)
355{
356 unsigned long continue_addr = 0, restart_addr = 0;
357 struct k_sigaction ka;
358 siginfo_t info;
359 int signr, retval = 0;
360 int syscall = (int)regs->syscallno;
361
362 /*
363 * If we were from a system call, check for system call restarting...
364 */
365 if (syscall >= 0) {
366 continue_addr = regs->pc;
367 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
368 retval = regs->regs[0];
369
370 /*
371 * Avoid additional syscall restarting via ret_to_user.
372 */
373 regs->syscallno = ~0UL;
374
375 /*
376 * Prepare for system call restart. We do this here so that a
377 * debugger will see the already changed PC.
378 */
379 switch (retval) {
380 case -ERESTARTNOHAND:
381 case -ERESTARTSYS:
382 case -ERESTARTNOINTR:
383 case -ERESTART_RESTARTBLOCK:
384 regs->regs[0] = regs->orig_x0;
385 regs->pc = restart_addr;
386 break;
387 }
388 }
389
390 /*
391 * Get the signal to deliver. When running under ptrace, at this point
392 * the debugger may change all of our registers.
393 */
394 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
395 if (signr > 0) {
396 /*
397 * Depending on the signal settings, we may need to revert the
398 * decision to restart the system call, but skip this if a
399 * debugger has chosen to restart at a different PC.
400 */
401 if (regs->pc == restart_addr &&
402 (retval == -ERESTARTNOHAND ||
403 retval == -ERESTART_RESTARTBLOCK ||
404 (retval == -ERESTARTSYS &&
405 !(ka.sa.sa_flags & SA_RESTART)))) {
406 regs->regs[0] = -EINTR;
407 regs->pc = continue_addr;
408 }
409
410 handle_signal(signr, &ka, &info, regs);
411 return;
412 }
413
414 /*
415 * Handle restarting a different system call. As above, if a debugger
416 * has chosen to restart at a different PC, ignore the restart.
417 */
418 if (syscall >= 0 && regs->pc == restart_addr) {
419 if (retval == -ERESTART_RESTARTBLOCK)
420 setup_restart_syscall(regs);
421 user_rewind_single_step(current);
422 }
423
424 restore_saved_sigmask();
425}
426
427asmlinkage void do_notify_resume(struct pt_regs *regs,
428 unsigned int thread_flags)
429{
430 if (thread_flags & _TIF_SIGPENDING)
431 do_signal(regs);
432
433 if (thread_flags & _TIF_NOTIFY_RESUME) {
434 clear_thread_flag(TIF_NOTIFY_RESUME);
435 tracehook_notify_resume(regs);
436 }
437}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
new file mode 100644
index 000000000000..ac74c2f261e3
--- /dev/null
+++ b/arch/arm64/kernel/signal32.c
@@ -0,0 +1,876 @@
1/*
2 * Based on arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Modified by Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define __SYSCALL_COMPAT
22
23#include <linux/compat.h>
24#include <linux/signal.h>
25#include <linux/syscalls.h>
26#include <linux/ratelimit.h>
27
28#include <asm/fpsimd.h>
29#include <asm/signal32.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32
33typedef struct compat_siginfo {
34 int si_signo;
35 int si_errno;
36 int si_code;
37
38 union {
39 /* The padding is the same size as AArch64. */
40 int _pad[SI_PAD_SIZE];
41
42 /* kill() */
43 struct {
44 compat_pid_t _pid; /* sender's pid */
45 __compat_uid32_t _uid; /* sender's uid */
46 } _kill;
47
48 /* POSIX.1b timers */
49 struct {
50 compat_timer_t _tid; /* timer id */
51 int _overrun; /* overrun count */
52 compat_sigval_t _sigval; /* same as below */
53 int _sys_private; /* not to be passed to user */
54 } _timer;
55
56 /* POSIX.1b signals */
57 struct {
58 compat_pid_t _pid; /* sender's pid */
59 __compat_uid32_t _uid; /* sender's uid */
60 compat_sigval_t _sigval;
61 } _rt;
62
63 /* SIGCHLD */
64 struct {
65 compat_pid_t _pid; /* which child */
66 __compat_uid32_t _uid; /* sender's uid */
67 int _status; /* exit code */
68 compat_clock_t _utime;
69 compat_clock_t _stime;
70 } _sigchld;
71
72 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
73 struct {
74 compat_uptr_t _addr; /* faulting insn/memory ref. */
75 short _addr_lsb; /* LSB of the reported address */
76 } _sigfault;
77
78 /* SIGPOLL */
79 struct {
80 compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
81 int _fd;
82 } _sigpoll;
83 } _sifields;
84} compat_siginfo_t;
85
86struct compat_sigaction {
87 compat_uptr_t sa_handler;
88 compat_ulong_t sa_flags;
89 compat_uptr_t sa_restorer;
90 compat_sigset_t sa_mask;
91};
92
93struct compat_old_sigaction {
94 compat_uptr_t sa_handler;
95 compat_old_sigset_t sa_mask;
96 compat_ulong_t sa_flags;
97 compat_uptr_t sa_restorer;
98};
99
100typedef struct compat_sigaltstack {
101 compat_uptr_t ss_sp;
102 int ss_flags;
103 compat_size_t ss_size;
104} compat_stack_t;
105
106struct compat_sigcontext {
107 /* We always set these two fields to 0 */
108 compat_ulong_t trap_no;
109 compat_ulong_t error_code;
110
111 compat_ulong_t oldmask;
112 compat_ulong_t arm_r0;
113 compat_ulong_t arm_r1;
114 compat_ulong_t arm_r2;
115 compat_ulong_t arm_r3;
116 compat_ulong_t arm_r4;
117 compat_ulong_t arm_r5;
118 compat_ulong_t arm_r6;
119 compat_ulong_t arm_r7;
120 compat_ulong_t arm_r8;
121 compat_ulong_t arm_r9;
122 compat_ulong_t arm_r10;
123 compat_ulong_t arm_fp;
124 compat_ulong_t arm_ip;
125 compat_ulong_t arm_sp;
126 compat_ulong_t arm_lr;
127 compat_ulong_t arm_pc;
128 compat_ulong_t arm_cpsr;
129 compat_ulong_t fault_address;
130};
131
132struct compat_ucontext {
133 compat_ulong_t uc_flags;
134 struct compat_ucontext *uc_link;
135 compat_stack_t uc_stack;
136 struct compat_sigcontext uc_mcontext;
137 compat_sigset_t uc_sigmask;
138 int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
139 compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
140};
141
142struct compat_vfp_sigframe {
143 compat_ulong_t magic;
144 compat_ulong_t size;
145 struct compat_user_vfp {
146 compat_u64 fpregs[32];
147 compat_ulong_t fpscr;
148 } ufp;
149 struct compat_user_vfp_exc {
150 compat_ulong_t fpexc;
151 compat_ulong_t fpinst;
152 compat_ulong_t fpinst2;
153 } ufp_exc;
154} __attribute__((__aligned__(8)));
155
156#define VFP_MAGIC 0x56465001
157#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
158
159struct compat_aux_sigframe {
160 struct compat_vfp_sigframe vfp;
161
162 /* Something that isn't a valid magic number for any coprocessor. */
163 unsigned long end_magic;
164} __attribute__((__aligned__(8)));
165
166struct compat_sigframe {
167 struct compat_ucontext uc;
168 compat_ulong_t retcode[2];
169};
170
171struct compat_rt_sigframe {
172 struct compat_siginfo info;
173 struct compat_sigframe sig;
174};
175
176#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
177
178/*
179 * For ARM syscalls, the syscall number has to be loaded into r7.
180 * We do not support an OABI userspace.
181 */
182#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_sigreturn)
183#define SVC_SYS_SIGRETURN (0xef000000 | __NR_sigreturn)
184#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_rt_sigreturn)
185#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_rt_sigreturn)
186
187/*
188 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
189 * need two 16-bit instructions.
190 */
191#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_sigreturn) << 16) | \
192 0x2700 | __NR_sigreturn)
193#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_rt_sigreturn) << 16) | \
194 0x2700 | __NR_rt_sigreturn)
195
196const compat_ulong_t aarch32_sigret_code[6] = {
197 /*
198 * AArch32 sigreturn code.
199 * We don't construct an OABI SWI - instead we just set the imm24 field
200 * to the EABI syscall number so that we create a sane disassembly.
201 */
202 MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
203 MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
204};
205
206static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
207{
208 compat_sigset_t cset;
209
210 cset.sig[0] = set->sig[0] & 0xffffffffull;
211 cset.sig[1] = set->sig[0] >> 32;
212
213 return copy_to_user(uset, &cset, sizeof(*uset));
214}
215
216static inline int get_sigset_t(sigset_t *set,
217 const compat_sigset_t __user *uset)
218{
219 compat_sigset_t s32;
220
221 if (copy_from_user(&s32, uset, sizeof(*uset)))
222 return -EFAULT;
223
224 set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
225 return 0;
226}
227
228int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
229{
230 int err;
231
232 if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
233 return -EFAULT;
234
235 /* If you change siginfo_t structure, please be sure
236 * this code is fixed accordingly.
237 * It should never copy any pad contained in the structure
238 * to avoid security leaks, but must copy the generic
239 * 3 ints plus the relevant union member.
240 * This routine must convert siginfo from 64bit to 32bit as well
241 * at the same time.
242 */
243 err = __put_user(from->si_signo, &to->si_signo);
244 err |= __put_user(from->si_errno, &to->si_errno);
245 err |= __put_user((short)from->si_code, &to->si_code);
246 if (from->si_code < 0)
247 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
248 SI_PAD_SIZE);
249 else switch (from->si_code & __SI_MASK) {
250 case __SI_KILL:
251 err |= __put_user(from->si_pid, &to->si_pid);
252 err |= __put_user(from->si_uid, &to->si_uid);
253 break;
254 case __SI_TIMER:
255 err |= __put_user(from->si_tid, &to->si_tid);
256 err |= __put_user(from->si_overrun, &to->si_overrun);
257 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
258 &to->si_ptr);
259 break;
260 case __SI_POLL:
261 err |= __put_user(from->si_band, &to->si_band);
262 err |= __put_user(from->si_fd, &to->si_fd);
263 break;
264 case __SI_FAULT:
265 err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
266 &to->si_addr);
267#ifdef BUS_MCEERR_AO
268 /*
269 * Other callers might not initialize the si_lsb field,
270 * so check explicitely for the right codes here.
271 */
272 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
273 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
274#endif
275 break;
276 case __SI_CHLD:
277 err |= __put_user(from->si_pid, &to->si_pid);
278 err |= __put_user(from->si_uid, &to->si_uid);
279 err |= __put_user(from->si_status, &to->si_status);
280 err |= __put_user(from->si_utime, &to->si_utime);
281 err |= __put_user(from->si_stime, &to->si_stime);
282 break;
283 case __SI_RT: /* This is not generated by the kernel as of now. */
284 case __SI_MESGQ: /* But this is */
285 err |= __put_user(from->si_pid, &to->si_pid);
286 err |= __put_user(from->si_uid, &to->si_uid);
287 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
288 break;
289 default: /* this is just in case for now ... */
290 err |= __put_user(from->si_pid, &to->si_pid);
291 err |= __put_user(from->si_uid, &to->si_uid);
292 break;
293 }
294 return err;
295}
296
297int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
298{
299 memset(to, 0, sizeof *to);
300
301 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
302 copy_from_user(to->_sifields._pad,
303 from->_sifields._pad, SI_PAD_SIZE))
304 return -EFAULT;
305
306 return 0;
307}
308
309/*
310 * VFP save/restore code.
311 */
312static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
313{
314 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
315 compat_ulong_t magic = VFP_MAGIC;
316 compat_ulong_t size = VFP_STORAGE_SIZE;
317 compat_ulong_t fpscr, fpexc;
318 int err = 0;
319
320 /*
321 * Save the hardware registers to the fpsimd_state structure.
322 * Note that this also saves V16-31, which aren't visible
323 * in AArch32.
324 */
325 fpsimd_save_state(fpsimd);
326
327 /* Place structure header on the stack */
328 __put_user_error(magic, &frame->magic, err);
329 __put_user_error(size, &frame->size, err);
330
331 /*
332 * Now copy the FP registers. Since the registers are packed,
333 * we can copy the prefix we want (V0-V15) as it is.
334 * FIXME: Won't work if big endian.
335 */
336 err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
337 sizeof(frame->ufp.fpregs));
338
339 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
340 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
341 (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
342 __put_user_error(fpscr, &frame->ufp.fpscr, err);
343
344 /*
345 * The exception register aren't available so we fake up a
346 * basic FPEXC and zero everything else.
347 */
348 fpexc = (1 << 30);
349 __put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
350 __put_user_error(0, &frame->ufp_exc.fpinst, err);
351 __put_user_error(0, &frame->ufp_exc.fpinst2, err);
352
353 return err ? -EFAULT : 0;
354}
355
356static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
357{
358 struct fpsimd_state fpsimd;
359 compat_ulong_t magic = VFP_MAGIC;
360 compat_ulong_t size = VFP_STORAGE_SIZE;
361 compat_ulong_t fpscr;
362 int err = 0;
363
364 __get_user_error(magic, &frame->magic, err);
365 __get_user_error(size, &frame->size, err);
366
367 if (err)
368 return -EFAULT;
369 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
370 return -EINVAL;
371
372 /*
373 * Copy the FP registers into the start of the fpsimd_state.
374 * FIXME: Won't work if big endian.
375 */
376 err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
377 sizeof(frame->ufp.fpregs));
378
379 /* Extract the fpsr and the fpcr from the fpscr */
380 __get_user_error(fpscr, &frame->ufp.fpscr, err);
381 fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
382 fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
383
384 /*
385 * We don't need to touch the exception register, so
386 * reload the hardware state.
387 */
388 if (!err) {
389 preempt_disable();
390 fpsimd_load_state(&fpsimd);
391 preempt_enable();
392 }
393
394 return err ? -EFAULT : 0;
395}
396
397/*
398 * atomically swap in the new signal mask, and wait for a signal.
399 */
400asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask,
401 compat_old_sigset_t mask)
402{
403 sigset_t blocked;
404
405 siginitset(&current->blocked, mask);
406 return sigsuspend(&blocked);
407}
408
409asmlinkage int compat_sys_sigaction(int sig,
410 const struct compat_old_sigaction __user *act,
411 struct compat_old_sigaction __user *oact)
412{
413 struct k_sigaction new_ka, old_ka;
414 int ret;
415 compat_old_sigset_t mask;
416 compat_uptr_t handler, restorer;
417
418 if (act) {
419 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
420 __get_user(handler, &act->sa_handler) ||
421 __get_user(restorer, &act->sa_restorer) ||
422 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
423 __get_user(mask, &act->sa_mask))
424 return -EFAULT;
425
426 new_ka.sa.sa_handler = compat_ptr(handler);
427 new_ka.sa.sa_restorer = compat_ptr(restorer);
428 siginitset(&new_ka.sa.sa_mask, mask);
429 }
430
431 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
432
433 if (!ret && oact) {
434 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
435 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
436 &oact->sa_handler) ||
437 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
438 &oact->sa_restorer) ||
439 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
440 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
441 return -EFAULT;
442 }
443
444 return ret;
445}
446
447asmlinkage int compat_sys_rt_sigaction(int sig,
448 const struct compat_sigaction __user *act,
449 struct compat_sigaction __user *oact,
450 compat_size_t sigsetsize)
451{
452 struct k_sigaction new_ka, old_ka;
453 int ret;
454
455 /* XXX: Don't preclude handling different sized sigset_t's. */
456 if (sigsetsize != sizeof(compat_sigset_t))
457 return -EINVAL;
458
459 if (act) {
460 compat_uptr_t handler, restorer;
461
462 ret = get_user(handler, &act->sa_handler);
463 new_ka.sa.sa_handler = compat_ptr(handler);
464 ret |= get_user(restorer, &act->sa_restorer);
465 new_ka.sa.sa_restorer = compat_ptr(restorer);
466 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
467 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
468 if (ret)
469 return -EFAULT;
470 }
471
472 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
473 if (!ret && oact) {
474 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
475 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
476 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
477 }
478 return ret;
479}
480
481int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss,
482 compat_ulong_t sp)
483{
484 compat_stack_t __user *newstack = compat_ptr(compat_uss);
485 compat_stack_t __user *oldstack = compat_ptr(compat_uoss);
486 compat_uptr_t ss_sp;
487 int ret;
488 mm_segment_t old_fs;
489 stack_t uss, uoss;
490
491 /* Marshall the compat new stack into a stack_t */
492 if (newstack) {
493 if (get_user(ss_sp, &newstack->ss_sp) ||
494 __get_user(uss.ss_flags, &newstack->ss_flags) ||
495 __get_user(uss.ss_size, &newstack->ss_size))
496 return -EFAULT;
497 uss.ss_sp = compat_ptr(ss_sp);
498 }
499
500 old_fs = get_fs();
501 set_fs(KERNEL_DS);
502 /* The __user pointer casts are valid because of the set_fs() */
503 ret = do_sigaltstack(
504 newstack ? (stack_t __user *) &uss : NULL,
505 oldstack ? (stack_t __user *) &uoss : NULL,
506 (unsigned long)sp);
507 set_fs(old_fs);
508
509 /* Convert the old stack_t into a compat stack. */
510 if (!ret && oldstack &&
511 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
512 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
513 __put_user(uoss.ss_size, &oldstack->ss_size)))
514 return -EFAULT;
515 return ret;
516}
517
518static int compat_restore_sigframe(struct pt_regs *regs,
519 struct compat_sigframe __user *sf)
520{
521 int err;
522 sigset_t set;
523 struct compat_aux_sigframe __user *aux;
524
525 err = get_sigset_t(&set, &sf->uc.uc_sigmask);
526 if (err == 0) {
527 sigdelsetmask(&set, ~_BLOCKABLE);
528 set_current_blocked(&set);
529 }
530
531 __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
532 __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
533 __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
534 __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
535 __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
536 __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
537 __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
538 __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
539 __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
540 __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
541 __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
542 __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
543 __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
544 __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
545 __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
546 __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
547 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
548
549 /*
550 * Avoid compat_sys_sigreturn() restarting.
551 */
552 regs->syscallno = ~0UL;
553
554 err |= !valid_user_regs(&regs->user_regs);
555
556 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
557 if (err == 0)
558 err |= compat_restore_vfp_context(&aux->vfp);
559
560 return err;
561}
562
563asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
564{
565 struct compat_sigframe __user *frame;
566
567 /* Always make any pending restarted system calls return -EINTR */
568 current_thread_info()->restart_block.fn = do_no_restart_syscall;
569
570 /*
571 * Since we stacked the signal on a 64-bit boundary,
572 * then 'sp' should be word aligned here. If it's
573 * not, then the user is trying to mess with us.
574 */
575 if (regs->compat_sp & 7)
576 goto badframe;
577
578 frame = (struct compat_sigframe __user *)regs->compat_sp;
579
580 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
581 goto badframe;
582
583 if (compat_restore_sigframe(regs, frame))
584 goto badframe;
585
586 return regs->regs[0];
587
588badframe:
589 if (show_unhandled_signals)
590 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
591 current->comm, task_pid_nr(current), __func__,
592 regs->pc, regs->sp);
593 force_sig(SIGSEGV, current);
594 return 0;
595}
596
597asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
598{
599 struct compat_rt_sigframe __user *frame;
600
601 /* Always make any pending restarted system calls return -EINTR */
602 current_thread_info()->restart_block.fn = do_no_restart_syscall;
603
604 /*
605 * Since we stacked the signal on a 64-bit boundary,
606 * then 'sp' should be word aligned here. If it's
607 * not, then the user is trying to mess with us.
608 */
609 if (regs->compat_sp & 7)
610 goto badframe;
611
612 frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
613
614 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
615 goto badframe;
616
617 if (compat_restore_sigframe(regs, &frame->sig))
618 goto badframe;
619
620 if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack),
621 ptr_to_compat((void __user *)NULL),
622 regs->compat_sp) == -EFAULT)
623 goto badframe;
624
625 return regs->regs[0];
626
627badframe:
628 if (show_unhandled_signals)
629 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
630 current->comm, task_pid_nr(current), __func__,
631 regs->pc, regs->sp);
632 force_sig(SIGSEGV, current);
633 return 0;
634}
635
636static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
637 struct pt_regs *regs,
638 int framesize)
639{
640 compat_ulong_t sp = regs->compat_sp;
641 void __user *frame;
642
643 /*
644 * This is the X/Open sanctioned signal stack switching.
645 */
646 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
647 sp = current->sas_ss_sp + current->sas_ss_size;
648
649 /*
650 * ATPCS B01 mandates 8-byte alignment
651 */
652 frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
653
654 /*
655 * Check that we can actually write to the signal frame.
656 */
657 if (!access_ok(VERIFY_WRITE, frame, framesize))
658 frame = NULL;
659
660 return frame;
661}
662
663static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
664 compat_ulong_t __user *rc, void __user *frame,
665 int usig)
666{
667 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
668 compat_ulong_t retcode;
669 compat_ulong_t spsr = regs->pstate & ~PSR_f;
670 int thumb;
671
672 /* Check if the handler is written for ARM or Thumb */
673 thumb = handler & 1;
674
675 if (thumb) {
676 spsr |= COMPAT_PSR_T_BIT;
677 spsr &= ~COMPAT_PSR_IT_MASK;
678 } else {
679 spsr &= ~COMPAT_PSR_T_BIT;
680 }
681
682 if (ka->sa.sa_flags & SA_RESTORER) {
683 retcode = ptr_to_compat(ka->sa.sa_restorer);
684 } else {
685 /* Set up sigreturn pointer */
686 unsigned int idx = thumb << 1;
687
688 if (ka->sa.sa_flags & SA_SIGINFO)
689 idx += 3;
690
691 retcode = AARCH32_VECTORS_BASE +
692 AARCH32_KERN_SIGRET_CODE_OFFSET +
693 (idx << 2) + thumb;
694 }
695
696 regs->regs[0] = usig;
697 regs->compat_sp = ptr_to_compat(frame);
698 regs->compat_lr = retcode;
699 regs->pc = handler;
700 regs->pstate = spsr;
701
702 return 0;
703}
704
705static int compat_setup_sigframe(struct compat_sigframe __user *sf,
706 struct pt_regs *regs, sigset_t *set)
707{
708 struct compat_aux_sigframe __user *aux;
709 int err = 0;
710
711 __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
712 __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
713 __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
714 __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
715 __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
716 __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
717 __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
718 __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
719 __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
720 __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
721 __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
722 __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
723 __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
724 __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
725 __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
726 __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
727 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
728
729 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
730 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err);
731 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
732 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
733
734 err |= put_sigset_t(&sf->uc.uc_sigmask, set);
735
736 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
737
738 if (err == 0)
739 err |= compat_preserve_vfp_context(&aux->vfp);
740 __put_user_error(0, &aux->end_magic, err);
741
742 return err;
743}
744
745/*
746 * 32-bit signal handling routines called from signal.c
747 */
748int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
749 sigset_t *set, struct pt_regs *regs)
750{
751 struct compat_rt_sigframe __user *frame;
752 compat_stack_t stack;
753 int err = 0;
754
755 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
756
757 if (!frame)
758 return 1;
759
760 err |= copy_siginfo_to_user32(&frame->info, info);
761
762 __put_user_error(0, &frame->sig.uc.uc_flags, err);
763 __put_user_error(NULL, &frame->sig.uc.uc_link, err);
764
765 memset(&stack, 0, sizeof(stack));
766 stack.ss_sp = (compat_uptr_t)current->sas_ss_sp;
767 stack.ss_flags = sas_ss_flags(regs->compat_sp);
768 stack.ss_size = current->sas_ss_size;
769 err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
770
771 err |= compat_setup_sigframe(&frame->sig, regs, set);
772 if (err == 0)
773 err = compat_setup_return(regs, ka, frame->sig.retcode, frame,
774 usig);
775
776 if (err == 0) {
777 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
778 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
779 }
780
781 return err;
782}
783
784int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
785 struct pt_regs *regs)
786{
787 struct compat_sigframe __user *frame;
788 int err = 0;
789
790 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
791
792 if (!frame)
793 return 1;
794
795 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
796
797 err |= compat_setup_sigframe(frame, regs, set);
798 if (err == 0)
799 err = compat_setup_return(regs, ka, frame->retcode, frame, usig);
800
801 return err;
802}
803
804/*
805 * RT signals don't have generic compat wrappers.
806 * See arch/powerpc/kernel/signal_32.c
807 */
808asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
809 compat_sigset_t __user *oset,
810 compat_size_t sigsetsize)
811{
812 sigset_t s;
813 sigset_t __user *up;
814 int ret;
815 mm_segment_t old_fs = get_fs();
816
817 if (set) {
818 if (get_sigset_t(&s, set))
819 return -EFAULT;
820 }
821
822 set_fs(KERNEL_DS);
823 /* This is valid because of the set_fs() */
824 up = (sigset_t __user *) &s;
825 ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL,
826 sigsetsize);
827 set_fs(old_fs);
828 if (ret)
829 return ret;
830 if (oset) {
831 if (put_sigset_t(oset, &s))
832 return -EFAULT;
833 }
834 return 0;
835}
836
837asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set,
838 compat_size_t sigsetsize)
839{
840 sigset_t s;
841 int ret;
842 mm_segment_t old_fs = get_fs();
843
844 set_fs(KERNEL_DS);
845 /* The __user pointer cast is valid because of the set_fs() */
846 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
847 set_fs(old_fs);
848 if (!ret) {
849 if (put_sigset_t(set, &s))
850 return -EFAULT;
851 }
852 return ret;
853}
854
855asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig,
856 compat_siginfo_t __user *uinfo)
857{
858 siginfo_t info;
859 int ret;
860 mm_segment_t old_fs = get_fs();
861
862 ret = copy_siginfo_from_user32(&info, uinfo);
863 if (unlikely(ret))
864 return ret;
865
866 set_fs (KERNEL_DS);
867 /* The __user pointer cast is valid because of the set_fs() */
868 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
869 set_fs (old_fs);
870 return ret;
871}
872
873void compat_setup_restart_syscall(struct pt_regs *regs)
874{
875 regs->regs[7] = __NR_restart_syscall;
876}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
new file mode 100644
index 000000000000..b711525be21f
--- /dev/null
+++ b/arch/arm64/kernel/smp.c
@@ -0,0 +1,469 @@
1/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/spinlock.h>
23#include <linux/sched.h>
24#include <linux/interrupt.h>
25#include <linux/cache.h>
26#include <linux/profile.h>
27#include <linux/errno.h>
28#include <linux/mm.h>
29#include <linux/err.h>
30#include <linux/cpu.h>
31#include <linux/smp.h>
32#include <linux/seq_file.h>
33#include <linux/irq.h>
34#include <linux/percpu.h>
35#include <linux/clockchips.h>
36#include <linux/completion.h>
37#include <linux/of.h>
38
39#include <asm/atomic.h>
40#include <asm/cacheflush.h>
41#include <asm/cputype.h>
42#include <asm/mmu_context.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/processor.h>
46#include <asm/sections.h>
47#include <asm/tlbflush.h>
48#include <asm/ptrace.h>
49#include <asm/mmu_context.h>
50
51/*
52 * as from 2.5, kernels no longer have an init_tasks structure
53 * so we need some other way of telling a new secondary core
54 * where to place its SVC stack
55 */
56struct secondary_data secondary_data;
57volatile unsigned long secondary_holding_pen_release = -1;
58
59enum ipi_msg_type {
60 IPI_RESCHEDULE,
61 IPI_CALL_FUNC,
62 IPI_CALL_FUNC_SINGLE,
63 IPI_CPU_STOP,
64};
65
66static DEFINE_RAW_SPINLOCK(boot_lock);
67
68/*
69 * Write secondary_holding_pen_release in a way that is guaranteed to be
70 * visible to all observers, irrespective of whether they're taking part
71 * in coherency or not. This is necessary for the hotplug code to work
72 * reliably.
73 */
74static void __cpuinit write_pen_release(int val)
75{
76 void *start = (void *)&secondary_holding_pen_release;
77 unsigned long size = sizeof(secondary_holding_pen_release);
78
79 secondary_holding_pen_release = val;
80 __flush_dcache_area(start, size);
81}
82
83/*
84 * Boot a secondary CPU, and assign it the specified idle task.
85 * This also gives us the initial stack to use for this CPU.
86 */
87static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
88{
89 unsigned long timeout;
90
91 /*
92 * Set synchronisation state between this boot processor
93 * and the secondary one
94 */
95 raw_spin_lock(&boot_lock);
96
97 /*
98 * Update the pen release flag.
99 */
100 write_pen_release(cpu);
101
102 /*
103 * Send an event, causing the secondaries to read pen_release.
104 */
105 sev();
106
107 timeout = jiffies + (1 * HZ);
108 while (time_before(jiffies, timeout)) {
109 if (secondary_holding_pen_release == -1UL)
110 break;
111 udelay(10);
112 }
113
114 /*
115 * Now the secondary core is starting up let it run its
116 * calibrations, then wait for it to finish
117 */
118 raw_spin_unlock(&boot_lock);
119
120 return secondary_holding_pen_release != -1 ? -ENOSYS : 0;
121}
122
123static DECLARE_COMPLETION(cpu_running);
124
125int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
126{
127 int ret;
128
129 /*
130 * We need to tell the secondary core where to find its stack and the
131 * page tables.
132 */
133 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
134 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
135
136 /*
137 * Now bring the CPU into our world.
138 */
139 ret = boot_secondary(cpu, idle);
140 if (ret == 0) {
141 /*
142 * CPU was successfully started, wait for it to come online or
143 * time out.
144 */
145 wait_for_completion_timeout(&cpu_running,
146 msecs_to_jiffies(1000));
147
148 if (!cpu_online(cpu)) {
149 pr_crit("CPU%u: failed to come online\n", cpu);
150 ret = -EIO;
151 }
152 } else {
153 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
154 }
155
156 secondary_data.stack = NULL;
157
158 return ret;
159}
160
161/*
162 * This is the secondary CPU boot entry. We're using this CPUs
163 * idle thread stack, but a set of temporary page tables.
164 */
165asmlinkage void __cpuinit secondary_start_kernel(void)
166{
167 struct mm_struct *mm = &init_mm;
168 unsigned int cpu = smp_processor_id();
169
170 printk("CPU%u: Booted secondary processor\n", cpu);
171
172 /*
173 * All kernel threads share the same mm context; grab a
174 * reference and switch to it.
175 */
176 atomic_inc(&mm->mm_count);
177 current->active_mm = mm;
178 cpumask_set_cpu(cpu, mm_cpumask(mm));
179
180 /*
181 * TTBR0 is only used for the identity mapping at this stage. Make it
182 * point to zero page to avoid speculatively fetching new entries.
183 */
184 cpu_set_reserved_ttbr0();
185 flush_tlb_all();
186
187 preempt_disable();
188 trace_hardirqs_off();
189
190 /*
191 * Let the primary processor know we're out of the
192 * pen, then head off into the C entry point
193 */
194 write_pen_release(-1);
195
196 /*
197 * Synchronise with the boot thread.
198 */
199 raw_spin_lock(&boot_lock);
200 raw_spin_unlock(&boot_lock);
201
202 /*
203 * Enable local interrupts.
204 */
205 notify_cpu_starting(cpu);
206 local_irq_enable();
207 local_fiq_enable();
208
209 /*
210 * OK, now it's safe to let the boot CPU continue. Wait for
211 * the CPU migration code to notice that the CPU is online
212 * before we continue.
213 */
214 set_cpu_online(cpu, true);
215 while (!cpu_active(cpu))
216 cpu_relax();
217
218 /*
219 * OK, it's off to the idle thread for us
220 */
221 cpu_idle();
222}
223
224void __init smp_cpus_done(unsigned int max_cpus)
225{
226 unsigned long bogosum = loops_per_jiffy * num_online_cpus();
227
228 pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
229 num_online_cpus(), bogosum / (500000/HZ),
230 (bogosum / (5000/HZ)) % 100);
231}
232
233void __init smp_prepare_boot_cpu(void)
234{
235}
236
237static void (*smp_cross_call)(const struct cpumask *, unsigned int);
238static phys_addr_t cpu_release_addr[NR_CPUS];
239
240/*
241 * Enumerate the possible CPU set from the device tree.
242 */
243void __init smp_init_cpus(void)
244{
245 const char *enable_method;
246 struct device_node *dn = NULL;
247 int cpu = 0;
248
249 while ((dn = of_find_node_by_type(dn, "cpu"))) {
250 if (cpu >= NR_CPUS)
251 goto next;
252
253 /*
254 * We currently support only the "spin-table" enable-method.
255 */
256 enable_method = of_get_property(dn, "enable-method", NULL);
257 if (!enable_method || strcmp(enable_method, "spin-table")) {
258 pr_err("CPU %d: missing or invalid enable-method property: %s\n",
259 cpu, enable_method);
260 goto next;
261 }
262
263 /*
264 * Determine the address from which the CPU is polling.
265 */
266 if (of_property_read_u64(dn, "cpu-release-addr",
267 &cpu_release_addr[cpu])) {
268 pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
269 cpu);
270 goto next;
271 }
272
273 set_cpu_possible(cpu, true);
274next:
275 cpu++;
276 }
277
278 /* sanity check */
279 if (cpu > NR_CPUS)
280 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
281 cpu, NR_CPUS);
282}
283
284void __init smp_prepare_cpus(unsigned int max_cpus)
285{
286 int cpu;
287 void **release_addr;
288 unsigned int ncores = num_possible_cpus();
289
290 /*
291 * are we trying to boot more cores than exist?
292 */
293 if (max_cpus > ncores)
294 max_cpus = ncores;
295
296 /*
297 * Initialise the present map (which describes the set of CPUs
298 * actually populated at the present time) and release the
299 * secondaries from the bootloader.
300 */
301 for_each_possible_cpu(cpu) {
302 if (max_cpus == 0)
303 break;
304
305 if (!cpu_release_addr[cpu])
306 continue;
307
308 release_addr = __va(cpu_release_addr[cpu]);
309 release_addr[0] = (void *)__pa(secondary_holding_pen);
310 __flush_dcache_area(release_addr, sizeof(release_addr[0]));
311
312 set_cpu_present(cpu, true);
313 max_cpus--;
314 }
315
316 /*
317 * Send an event to wake up the secondaries.
318 */
319 sev();
320}
321
322
323void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
324{
325 smp_cross_call = fn;
326}
327
328void arch_send_call_function_ipi_mask(const struct cpumask *mask)
329{
330 smp_cross_call(mask, IPI_CALL_FUNC);
331}
332
333void arch_send_call_function_single_ipi(int cpu)
334{
335 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
336}
337
338static const char *ipi_types[NR_IPI] = {
339#define S(x,s) [x - IPI_RESCHEDULE] = s
340 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
341 S(IPI_CALL_FUNC, "Function call interrupts"),
342 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
343 S(IPI_CPU_STOP, "CPU stop interrupts"),
344};
345
346void show_ipi_list(struct seq_file *p, int prec)
347{
348 unsigned int cpu, i;
349
350 for (i = 0; i < NR_IPI; i++) {
351 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
352 prec >= 4 ? " " : "");
353 for_each_present_cpu(cpu)
354 seq_printf(p, "%10u ",
355 __get_irq_stat(cpu, ipi_irqs[i]));
356 seq_printf(p, " %s\n", ipi_types[i]);
357 }
358}
359
360u64 smp_irq_stat_cpu(unsigned int cpu)
361{
362 u64 sum = 0;
363 int i;
364
365 for (i = 0; i < NR_IPI; i++)
366 sum += __get_irq_stat(cpu, ipi_irqs[i]);
367
368 return sum;
369}
370
371static DEFINE_RAW_SPINLOCK(stop_lock);
372
373/*
374 * ipi_cpu_stop - handle IPI from smp_send_stop()
375 */
376static void ipi_cpu_stop(unsigned int cpu)
377{
378 if (system_state == SYSTEM_BOOTING ||
379 system_state == SYSTEM_RUNNING) {
380 raw_spin_lock(&stop_lock);
381 pr_crit("CPU%u: stopping\n", cpu);
382 dump_stack();
383 raw_spin_unlock(&stop_lock);
384 }
385
386 set_cpu_online(cpu, false);
387
388 local_fiq_disable();
389 local_irq_disable();
390
391 while (1)
392 cpu_relax();
393}
394
395/*
396 * Main handler for inter-processor interrupts
397 */
398void handle_IPI(int ipinr, struct pt_regs *regs)
399{
400 unsigned int cpu = smp_processor_id();
401 struct pt_regs *old_regs = set_irq_regs(regs);
402
403 if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
404 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
405
406 switch (ipinr) {
407 case IPI_RESCHEDULE:
408 scheduler_ipi();
409 break;
410
411 case IPI_CALL_FUNC:
412 irq_enter();
413 generic_smp_call_function_interrupt();
414 irq_exit();
415 break;
416
417 case IPI_CALL_FUNC_SINGLE:
418 irq_enter();
419 generic_smp_call_function_single_interrupt();
420 irq_exit();
421 break;
422
423 case IPI_CPU_STOP:
424 irq_enter();
425 ipi_cpu_stop(cpu);
426 irq_exit();
427 break;
428
429 default:
430 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
431 break;
432 }
433 set_irq_regs(old_regs);
434}
435
436void smp_send_reschedule(int cpu)
437{
438 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
439}
440
441void smp_send_stop(void)
442{
443 unsigned long timeout;
444
445 if (num_online_cpus() > 1) {
446 cpumask_t mask;
447
448 cpumask_copy(&mask, cpu_online_mask);
449 cpu_clear(smp_processor_id(), mask);
450
451 smp_cross_call(&mask, IPI_CPU_STOP);
452 }
453
454 /* Wait up to one second for other CPUs to stop */
455 timeout = USEC_PER_SEC;
456 while (num_online_cpus() > 1 && timeout--)
457 udelay(1);
458
459 if (num_online_cpus() > 1)
460 pr_warning("SMP: failed to stop secondary CPUs\n");
461}
462
463/*
464 * not supported here
465 */
466int setup_profiling_timer(unsigned int multiplier)
467{
468 return -EINVAL;
469}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
new file mode 100644
index 000000000000..d25459ff57fc
--- /dev/null
+++ b/arch/arm64/kernel/stacktrace.c
@@ -0,0 +1,127 @@
1/*
2 * Stack tracing support
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/export.h>
20#include <linux/sched.h>
21#include <linux/stacktrace.h>
22
23#include <asm/stacktrace.h>
24
25/*
26 * AArch64 PCS assigns the frame pointer to x29.
27 *
28 * A simple function prologue looks like this:
29 * sub sp, sp, #0x10
30 * stp x29, x30, [sp]
31 * mov x29, sp
32 *
33 * A simple function epilogue looks like this:
34 * mov sp, x29
35 * ldp x29, x30, [sp]
36 * add sp, sp, #0x10
37 */
38int unwind_frame(struct stackframe *frame)
39{
40 unsigned long high, low;
41 unsigned long fp = frame->fp;
42
43 low = frame->sp;
44 high = ALIGN(low, THREAD_SIZE);
45
46 if (fp < low || fp > high || fp & 0xf)
47 return -EINVAL;
48
49 frame->sp = fp + 0x10;
50 frame->fp = *(unsigned long *)(fp);
51 frame->pc = *(unsigned long *)(fp + 8);
52
53 return 0;
54}
55
56void notrace walk_stackframe(struct stackframe *frame,
57 int (*fn)(struct stackframe *, void *), void *data)
58{
59 while (1) {
60 int ret;
61
62 if (fn(frame, data))
63 break;
64 ret = unwind_frame(frame);
65 if (ret < 0)
66 break;
67 }
68}
69EXPORT_SYMBOL(walk_stackframe);
70
71#ifdef CONFIG_STACKTRACE
72struct stack_trace_data {
73 struct stack_trace *trace;
74 unsigned int no_sched_functions;
75 unsigned int skip;
76};
77
78static int save_trace(struct stackframe *frame, void *d)
79{
80 struct stack_trace_data *data = d;
81 struct stack_trace *trace = data->trace;
82 unsigned long addr = frame->pc;
83
84 if (data->no_sched_functions && in_sched_functions(addr))
85 return 0;
86 if (data->skip) {
87 data->skip--;
88 return 0;
89 }
90
91 trace->entries[trace->nr_entries++] = addr;
92
93 return trace->nr_entries >= trace->max_entries;
94}
95
96void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
97{
98 struct stack_trace_data data;
99 struct stackframe frame;
100
101 data.trace = trace;
102 data.skip = trace->skip;
103
104 if (tsk != current) {
105 data.no_sched_functions = 1;
106 frame.fp = thread_saved_fp(tsk);
107 frame.sp = thread_saved_sp(tsk);
108 frame.pc = thread_saved_pc(tsk);
109 } else {
110 register unsigned long current_sp asm("sp");
111 data.no_sched_functions = 0;
112 frame.fp = (unsigned long)__builtin_frame_address(0);
113 frame.sp = current_sp;
114 frame.pc = (unsigned long)save_stack_trace_tsk;
115 }
116
117 walk_stackframe(&frame, save_trace, &data);
118 if (trace->nr_entries < trace->max_entries)
119 trace->entries[trace->nr_entries++] = ULONG_MAX;
120}
121
122void save_stack_trace(struct stack_trace *trace)
123{
124 save_stack_trace_tsk(current, trace);
125}
126EXPORT_SYMBOL_GPL(save_stack_trace);
127#endif
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
new file mode 100644
index 000000000000..905fcfb0ddd0
--- /dev/null
+++ b/arch/arm64/kernel/sys.c
@@ -0,0 +1,138 @@
1/*
2 * AArch64-specific system calls implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/compiler.h>
21#include <linux/errno.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
24#include <linux/export.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/syscalls.h>
28
29/*
30 * Clone a task - this clones the calling program thread.
31 */
32asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
33 int __user *parent_tidptr, unsigned long tls_val,
34 int __user *child_tidptr, struct pt_regs *regs)
35{
36 if (!newsp)
37 newsp = regs->sp;
38 /* 16-byte aligned stack mandatory on AArch64 */
39 if (newsp & 15)
40 return -EINVAL;
41 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
42}
43
44/*
45 * sys_execve() executes a new program.
46 */
47asmlinkage long sys_execve(const char __user *filenamei,
48 const char __user *const __user *argv,
49 const char __user *const __user *envp,
50 struct pt_regs *regs)
51{
52 long error;
53 char * filename;
54
55 filename = getname(filenamei);
56 error = PTR_ERR(filename);
57 if (IS_ERR(filename))
58 goto out;
59 error = do_execve(filename, argv, envp, regs);
60 putname(filename);
61out:
62 return error;
63}
64
65int kernel_execve(const char *filename,
66 const char *const argv[],
67 const char *const envp[])
68{
69 struct pt_regs regs;
70 int ret;
71
72 memset(&regs, 0, sizeof(struct pt_regs));
73 ret = do_execve(filename,
74 (const char __user *const __user *)argv,
75 (const char __user *const __user *)envp, &regs);
76 if (ret < 0)
77 goto out;
78
79 /*
80 * Save argc to the register structure for userspace.
81 */
82 regs.regs[0] = ret;
83
84 /*
85 * We were successful. We won't be returning to our caller, but
86 * instead to user space by manipulating the kernel stack.
87 */
88 asm( "add x0, %0, %1\n\t"
89 "mov x1, %2\n\t"
90 "mov x2, %3\n\t"
91 "bl memmove\n\t" /* copy regs to top of stack */
92 "mov x27, #0\n\t" /* not a syscall */
93 "mov x28, %0\n\t" /* thread structure */
94 "mov sp, x0\n\t" /* reposition stack pointer */
95 "b ret_to_user"
96 :
97 : "r" (current_thread_info()),
98 "Ir" (THREAD_START_SP - sizeof(regs)),
99 "r" (&regs),
100 "Ir" (sizeof(regs))
101 : "x0", "x1", "x2", "x27", "x28", "x30", "memory");
102
103 out:
104 return ret;
105}
106EXPORT_SYMBOL(kernel_execve);
107
108asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
109 unsigned long prot, unsigned long flags,
110 unsigned long fd, off_t off)
111{
112 if (offset_in_page(off) != 0)
113 return -EINVAL;
114
115 return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
116}
117
118/*
119 * Wrappers to pass the pt_regs argument.
120 */
121#define sys_execve sys_execve_wrapper
122#define sys_clone sys_clone_wrapper
123#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
124#define sys_sigaltstack sys_sigaltstack_wrapper
125
126#include <asm/syscalls.h>
127
128#undef __SYSCALL
129#define __SYSCALL(nr, sym) [nr] = sym,
130
131/*
132 * The sys_call_table array must be 4K aligned to be accessible from
133 * kernel/entry.S.
134 */
135void *sys_call_table[__NR_syscalls] __aligned(4096) = {
136 [0 ... __NR_syscalls - 1] = sys_ni_syscall,
137#include <asm/unistd.h>
138};
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
new file mode 100644
index 000000000000..5e4dc93cc31f
--- /dev/null
+++ b/arch/arm64/kernel/sys32.S
@@ -0,0 +1,282 @@
1/*
2 * Compat system call wrappers
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Will Deacon <will.deacon@arm.com>
6 * Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/linkage.h>
22
23#include <asm/assembler.h>
24#include <asm/asm-offsets.h>
25
26/*
27 * System call wrappers for the AArch32 compatibility layer.
28 */
29compat_sys_fork_wrapper:
30 mov x0, sp
31 b compat_sys_fork
32ENDPROC(compat_sys_fork_wrapper)
33
34compat_sys_vfork_wrapper:
35 mov x0, sp
36 b compat_sys_vfork
37ENDPROC(compat_sys_vfork_wrapper)
38
39compat_sys_execve_wrapper:
40 mov x3, sp
41 b compat_sys_execve
42ENDPROC(compat_sys_execve_wrapper)
43
44compat_sys_clone_wrapper:
45 mov x5, sp
46 b compat_sys_clone
47ENDPROC(compat_sys_clone_wrapper)
48
49compat_sys_sigreturn_wrapper:
50 mov x0, sp
51 mov x27, #0 // prevent syscall restart handling (why)
52 b compat_sys_sigreturn
53ENDPROC(compat_sys_sigreturn_wrapper)
54
55compat_sys_rt_sigreturn_wrapper:
56 mov x0, sp
57 mov x27, #0 // prevent syscall restart handling (why)
58 b compat_sys_rt_sigreturn
59ENDPROC(compat_sys_rt_sigreturn_wrapper)
60
61compat_sys_sigaltstack_wrapper:
62 ldr x2, [sp, #S_COMPAT_SP]
63 b compat_do_sigaltstack
64ENDPROC(compat_sys_sigaltstack_wrapper)
65
66compat_sys_statfs64_wrapper:
67 mov w3, #84
68 cmp w1, #88
69 csel w1, w3, w1, eq
70 b compat_sys_statfs64
71ENDPROC(compat_sys_statfs64_wrapper)
72
73compat_sys_fstatfs64_wrapper:
74 mov w3, #84
75 cmp w1, #88
76 csel w1, w3, w1, eq
77 b compat_sys_fstatfs64
78ENDPROC(compat_sys_fstatfs64_wrapper)
79
80/*
81 * Wrappers for AArch32 syscalls that either take 64-bit parameters
82 * in registers or that take 32-bit parameters which require sign
83 * extension.
84 */
85compat_sys_lseek_wrapper:
86 sxtw x1, w1
87 b sys_lseek
88ENDPROC(compat_sys_lseek_wrapper)
89
90compat_sys_pread64_wrapper:
91 orr x3, x4, x5, lsl #32
92 b sys_pread64
93ENDPROC(compat_sys_pread64_wrapper)
94
95compat_sys_pwrite64_wrapper:
96 orr x3, x4, x5, lsl #32
97 b sys_pwrite64
98ENDPROC(compat_sys_pwrite64_wrapper)
99
100compat_sys_truncate64_wrapper:
101 orr x1, x2, x3, lsl #32
102 b sys_truncate
103ENDPROC(compat_sys_truncate64_wrapper)
104
105compat_sys_ftruncate64_wrapper:
106 orr x1, x2, x3, lsl #32
107 b sys_ftruncate
108ENDPROC(compat_sys_ftruncate64_wrapper)
109
110compat_sys_readahead_wrapper:
111 orr x1, x2, x3, lsl #32
112 mov w2, w4
113 b sys_readahead
114ENDPROC(compat_sys_readahead_wrapper)
115
116compat_sys_lookup_dcookie:
117 orr x0, x0, x1, lsl #32
118 mov w1, w2
119 mov w2, w3
120 b sys_lookup_dcookie
121ENDPROC(compat_sys_lookup_dcookie)
122
123compat_sys_fadvise64_64_wrapper:
124 mov w6, w1
125 orr x1, x2, x3, lsl #32
126 orr x2, x4, x5, lsl #32
127 mov w3, w6
128 b sys_fadvise64_64
129ENDPROC(compat_sys_fadvise64_64_wrapper)
130
131compat_sys_sync_file_range2_wrapper:
132 orr x2, x2, x3, lsl #32
133 orr x3, x4, x5, lsl #32
134 b sys_sync_file_range2
135ENDPROC(compat_sys_sync_file_range2_wrapper)
136
137compat_sys_fallocate_wrapper:
138 orr x2, x2, x3, lsl #32
139 orr x3, x4, x5, lsl #32
140 b sys_fallocate
141ENDPROC(compat_sys_fallocate_wrapper)
142
143compat_sys_fanotify_mark_wrapper:
144 orr x2, x2, x3, lsl #32
145 mov w3, w4
146 mov w4, w5
147 b sys_fanotify_mark
148ENDPROC(compat_sys_fanotify_mark_wrapper)
149
150/*
151 * Use the compat system call wrappers.
152 */
153#define sys_fork compat_sys_fork_wrapper
154#define sys_open compat_sys_open
155#define sys_execve compat_sys_execve_wrapper
156#define sys_lseek compat_sys_lseek_wrapper
157#define sys_mount compat_sys_mount
158#define sys_ptrace compat_sys_ptrace
159#define sys_times compat_sys_times
160#define sys_ioctl compat_sys_ioctl
161#define sys_fcntl compat_sys_fcntl
162#define sys_ustat compat_sys_ustat
163#define sys_sigaction compat_sys_sigaction
164#define sys_sigsuspend compat_sys_sigsuspend
165#define sys_sigpending compat_sys_sigpending
166#define sys_setrlimit compat_sys_setrlimit
167#define sys_getrusage compat_sys_getrusage
168#define sys_gettimeofday compat_sys_gettimeofday
169#define sys_settimeofday compat_sys_settimeofday
170#define sys_statfs compat_sys_statfs
171#define sys_fstatfs compat_sys_fstatfs
172#define sys_setitimer compat_sys_setitimer
173#define sys_getitimer compat_sys_getitimer
174#define sys_newstat compat_sys_newstat
175#define sys_newlstat compat_sys_newlstat
176#define sys_newfstat compat_sys_newfstat
177#define sys_wait4 compat_sys_wait4
178#define sys_sysinfo compat_sys_sysinfo
179#define sys_sigreturn compat_sys_sigreturn_wrapper
180#define sys_clone compat_sys_clone_wrapper
181#define sys_adjtimex compat_sys_adjtimex
182#define sys_sigprocmask compat_sys_sigprocmask
183#define sys_getdents compat_sys_getdents
184#define sys_select compat_sys_select
185#define sys_readv compat_sys_readv
186#define sys_writev compat_sys_writev
187#define sys_sysctl compat_sys_sysctl
188#define sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
189#define sys_nanosleep compat_sys_nanosleep
190#define sys_rt_sigreturn compat_sys_rt_sigreturn_wrapper
191#define sys_rt_sigaction compat_sys_rt_sigaction
192#define sys_rt_sigprocmask compat_sys_rt_sigprocmask
193#define sys_rt_sigpending compat_sys_rt_sigpending
194#define sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
195#define sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
196#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
197#define sys_pread64 compat_sys_pread64_wrapper
198#define sys_pwrite64 compat_sys_pwrite64_wrapper
199#define sys_sigaltstack compat_sys_sigaltstack_wrapper
200#define sys_sendfile compat_sys_sendfile
201#define sys_vfork compat_sys_vfork_wrapper
202#define sys_getrlimit compat_sys_getrlimit
203#define sys_mmap2 sys_mmap_pgoff
204#define sys_truncate64 compat_sys_truncate64_wrapper
205#define sys_ftruncate64 compat_sys_ftruncate64_wrapper
206#define sys_getdents64 compat_sys_getdents64
207#define sys_fcntl64 compat_sys_fcntl64
208#define sys_readahead compat_sys_readahead_wrapper
209#define sys_futex compat_sys_futex
210#define sys_sched_setaffinity compat_sys_sched_setaffinity
211#define sys_sched_getaffinity compat_sys_sched_getaffinity
212#define sys_io_setup compat_sys_io_setup
213#define sys_io_getevents compat_sys_io_getevents
214#define sys_io_submit compat_sys_io_submit
215#define sys_lookup_dcookie compat_sys_lookup_dcookie
216#define sys_timer_create compat_sys_timer_create
217#define sys_timer_settime compat_sys_timer_settime
218#define sys_timer_gettime compat_sys_timer_gettime
219#define sys_clock_settime compat_sys_clock_settime
220#define sys_clock_gettime compat_sys_clock_gettime
221#define sys_clock_getres compat_sys_clock_getres
222#define sys_clock_nanosleep compat_sys_clock_nanosleep
223#define sys_statfs64 compat_sys_statfs64_wrapper
224#define sys_fstatfs64 compat_sys_fstatfs64_wrapper
225#define sys_utimes compat_sys_utimes
226#define sys_fadvise64_64 compat_sys_fadvise64_64_wrapper
227#define sys_mq_open compat_sys_mq_open
228#define sys_mq_timedsend compat_sys_mq_timedsend
229#define sys_mq_timedreceive compat_sys_mq_timedreceive
230#define sys_mq_notify compat_sys_mq_notify
231#define sys_mq_getsetattr compat_sys_mq_getsetattr
232#define sys_waitid compat_sys_waitid
233#define sys_recv compat_sys_recv
234#define sys_recvfrom compat_sys_recvfrom
235#define sys_setsockopt compat_sys_setsockopt
236#define sys_getsockopt compat_sys_getsockopt
237#define sys_sendmsg compat_sys_sendmsg
238#define sys_recvmsg compat_sys_recvmsg
239#define sys_semctl compat_sys_semctl
240#define sys_msgsnd compat_sys_msgsnd
241#define sys_msgrcv compat_sys_msgrcv
242#define sys_msgctl compat_sys_msgctl
243#define sys_shmat compat_sys_shmat
244#define sys_shmctl compat_sys_shmctl
245#define sys_keyctl compat_sys_keyctl
246#define sys_semtimedop compat_sys_semtimedop
247#define sys_mbind compat_sys_mbind
248#define sys_get_mempolicy compat_sys_get_mempolicy
249#define sys_set_mempolicy compat_sys_set_mempolicy
250#define sys_openat compat_sys_openat
251#define sys_futimesat compat_sys_futimesat
252#define sys_pselect6 compat_sys_pselect6
253#define sys_ppoll compat_sys_ppoll
254#define sys_set_robust_list compat_sys_set_robust_list
255#define sys_get_robust_list compat_sys_get_robust_list
256#define sys_sync_file_range2 compat_sys_sync_file_range2_wrapper
257#define sys_vmsplice compat_sys_vmsplice
258#define sys_move_pages compat_sys_move_pages
259#define sys_epoll_pwait compat_sys_epoll_pwait
260#define sys_kexec_load compat_sys_kexec_load
261#define sys_utimensat compat_sys_utimensat
262#define sys_signalfd compat_sys_signalfd
263#define sys_fallocate compat_sys_fallocate_wrapper
264#define sys_timerfd_settime compat_sys_timerfd_settime
265#define sys_timerfd_gettime compat_sys_timerfd_gettime
266#define sys_signalfd4 compat_sys_signalfd4
267#define sys_preadv compat_sys_preadv
268#define sys_pwritev compat_sys_pwritev
269#define sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
270#define sys_recvmmsg compat_sys_recvmmsg
271#define sys_fanotify_mark compat_sys_fanotify_mark_wrapper
272
273#undef __SYSCALL
274#define __SYSCALL(x, y) .quad y // x
275#define __SYSCALL_COMPAT
276
277/*
278 * The system calls table must be 4KB aligned.
279 */
280 .align 12
281ENTRY(compat_sys_call_table)
282#include <asm/unistd.h>
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
new file mode 100644
index 000000000000..967e92fdff01
--- /dev/null
+++ b/arch/arm64/kernel/sys_compat.c
@@ -0,0 +1,164 @@
1/*
2 * Based on arch/arm/kernel/sys_arm.c
3 *
4 * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
5 * Copyright (C) 1995, 1996 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define __SYSCALL_COMPAT
22
23#include <linux/compat.h>
24#include <linux/personality.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29
30#include <asm/cacheflush.h>
31#include <asm/unistd.h>
32
33asmlinkage int compat_sys_fork(struct pt_regs *regs)
34{
35 return do_fork(SIGCHLD, regs->compat_sp, regs, 0, NULL, NULL);
36}
37
38asmlinkage int compat_sys_clone(unsigned long clone_flags, unsigned long newsp,
39 int __user *parent_tidptr, int tls_val,
40 int __user *child_tidptr, struct pt_regs *regs)
41{
42 if (!newsp)
43 newsp = regs->compat_sp;
44
45 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
46}
47
48asmlinkage int compat_sys_vfork(struct pt_regs *regs)
49{
50 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->compat_sp,
51 regs, 0, NULL, NULL);
52}
53
54asmlinkage int compat_sys_execve(const char __user *filenamei,
55 compat_uptr_t argv, compat_uptr_t envp,
56 struct pt_regs *regs)
57{
58 int error;
59 char * filename;
60
61 filename = getname(filenamei);
62 error = PTR_ERR(filename);
63 if (IS_ERR(filename))
64 goto out;
65 error = compat_do_execve(filename, compat_ptr(argv), compat_ptr(envp),
66 regs);
67 putname(filename);
68out:
69 return error;
70}
71
72asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid,
73 struct compat_timespec __user *interval)
74{
75 struct timespec t;
76 int ret;
77 mm_segment_t old_fs = get_fs();
78
79 set_fs(KERNEL_DS);
80 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
81 set_fs(old_fs);
82 if (put_compat_timespec(&t, interval))
83 return -EFAULT;
84 return ret;
85}
86
87asmlinkage int compat_sys_sendfile(int out_fd, int in_fd,
88 compat_off_t __user *offset, s32 count)
89{
90 mm_segment_t old_fs = get_fs();
91 int ret;
92 off_t of;
93
94 if (offset && get_user(of, offset))
95 return -EFAULT;
96
97 set_fs(KERNEL_DS);
98 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
99 count);
100 set_fs(old_fs);
101
102 if (offset && put_user(of, offset))
103 return -EFAULT;
104 return ret;
105}
106
107static inline void
108do_compat_cache_op(unsigned long start, unsigned long end, int flags)
109{
110 struct mm_struct *mm = current->active_mm;
111 struct vm_area_struct *vma;
112
113 if (end < start || flags)
114 return;
115
116 down_read(&mm->mmap_sem);
117 vma = find_vma(mm, start);
118 if (vma && vma->vm_start < end) {
119 if (start < vma->vm_start)
120 start = vma->vm_start;
121 if (end > vma->vm_end)
122 end = vma->vm_end;
123 up_read(&mm->mmap_sem);
124 __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end));
125 return;
126 }
127 up_read(&mm->mmap_sem);
128}
129
130/*
131 * Handle all unrecognised system calls.
132 */
133long compat_arm_syscall(struct pt_regs *regs)
134{
135 unsigned int no = regs->regs[7];
136
137 switch (no) {
138 /*
139 * Flush a region from virtual address 'r0' to virtual address 'r1'
140 * _exclusive_. There is no alignment requirement on either address;
141 * user space does not need to know the hardware cache layout.
142 *
143 * r2 contains flags. It should ALWAYS be passed as ZERO until it
144 * is defined to be something else. For now we ignore it, but may
145 * the fires of hell burn in your belly if you break this rule. ;)
146 *
147 * (at a later date, we may want to allow this call to not flush
148 * various aspects of the cache. Passing '0' will guarantee that
149 * everything necessary gets flushed to maintain consistency in
150 * the specified region).
151 */
152 case __ARM_NR_compat_cacheflush:
153 do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
154 return 0;
155
156 case __ARM_NR_compat_set_tls:
157 current->thread.tp_value = regs->regs[0];
158 asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
159 return 0;
160
161 default:
162 return -ENOSYS;
163 }
164}
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
new file mode 100644
index 000000000000..3b4b7258f492
--- /dev/null
+++ b/arch/arm64/kernel/time.c
@@ -0,0 +1,65 @@
1/*
2 * Based on arch/arm/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994-2001 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/export.h>
22#include <linux/kernel.h>
23#include <linux/interrupt.h>
24#include <linux/time.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/smp.h>
28#include <linux/timex.h>
29#include <linux/errno.h>
30#include <linux/profile.h>
31#include <linux/syscore_ops.h>
32#include <linux/timer.h>
33#include <linux/irq.h>
34
35#include <clocksource/arm_generic.h>
36
37#include <asm/thread_info.h>
38#include <asm/stacktrace.h>
39
40#ifdef CONFIG_SMP
41unsigned long profile_pc(struct pt_regs *regs)
42{
43 struct stackframe frame;
44
45 if (!in_lock_functions(regs->pc))
46 return regs->pc;
47
48 frame.fp = regs->regs[29];
49 frame.sp = regs->sp;
50 frame.pc = regs->pc;
51 do {
52 int ret = unwind_frame(&frame);
53 if (ret < 0)
54 return 0;
55 } while (in_lock_functions(frame.pc));
56
57 return frame.pc;
58}
59EXPORT_SYMBOL(profile_pc);
60#endif
61
62void __init time_init(void)
63{
64 arm_generic_timer_init();
65}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
new file mode 100644
index 000000000000..3883f842434f
--- /dev/null
+++ b/arch/arm64/kernel/traps.c
@@ -0,0 +1,348 @@
1/*
2 * Based on arch/arm/kernel/traps.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/signal.h>
21#include <linux/personality.h>
22#include <linux/kallsyms.h>
23#include <linux/spinlock.h>
24#include <linux/uaccess.h>
25#include <linux/hardirq.h>
26#include <linux/kdebug.h>
27#include <linux/module.h>
28#include <linux/kexec.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/sched.h>
32#include <linux/syscalls.h>
33
34#include <asm/atomic.h>
35#include <asm/traps.h>
36#include <asm/stacktrace.h>
37#include <asm/exception.h>
38#include <asm/system_misc.h>
39
40static const char *handler[]= {
41 "Synchronous Abort",
42 "IRQ",
43 "FIQ",
44 "Error"
45};
46
47int show_unhandled_signals = 1;
48
49/*
50 * Dump out the contents of some memory nicely...
51 */
52static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
53 unsigned long top)
54{
55 unsigned long first;
56 mm_segment_t fs;
57 int i;
58
59 /*
60 * We need to switch to kernel mode so that we can use __get_user
61 * to safely read from kernel space. Note that we now dump the
62 * code first, just in case the backtrace kills us.
63 */
64 fs = get_fs();
65 set_fs(KERNEL_DS);
66
67 printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
68
69 for (first = bottom & ~31; first < top; first += 32) {
70 unsigned long p;
71 char str[sizeof(" 12345678") * 8 + 1];
72
73 memset(str, ' ', sizeof(str));
74 str[sizeof(str) - 1] = '\0';
75
76 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
77 if (p >= bottom && p < top) {
78 unsigned int val;
79 if (__get_user(val, (unsigned int *)p) == 0)
80 sprintf(str + i * 9, " %08x", val);
81 else
82 sprintf(str + i * 9, " ????????");
83 }
84 }
85 printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
86 }
87
88 set_fs(fs);
89}
90
91static void dump_backtrace_entry(unsigned long where, unsigned long stack)
92{
93 print_ip_sym(where);
94 if (in_exception_text(where))
95 dump_mem("", "Exception stack", stack,
96 stack + sizeof(struct pt_regs));
97}
98
99static void dump_instr(const char *lvl, struct pt_regs *regs)
100{
101 unsigned long addr = instruction_pointer(regs);
102 mm_segment_t fs;
103 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
104 int i;
105
106 /*
107 * We need to switch to kernel mode so that we can use __get_user
108 * to safely read from kernel space. Note that we now dump the
109 * code first, just in case the backtrace kills us.
110 */
111 fs = get_fs();
112 set_fs(KERNEL_DS);
113
114 for (i = -4; i < 1; i++) {
115 unsigned int val, bad;
116
117 bad = __get_user(val, &((u32 *)addr)[i]);
118
119 if (!bad)
120 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
121 else {
122 p += sprintf(p, "bad PC value");
123 break;
124 }
125 }
126 printk("%sCode: %s\n", lvl, str);
127
128 set_fs(fs);
129}
130
131static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
132{
133 struct stackframe frame;
134 const register unsigned long current_sp asm ("sp");
135
136 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
137
138 if (!tsk)
139 tsk = current;
140
141 if (regs) {
142 frame.fp = regs->regs[29];
143 frame.sp = regs->sp;
144 frame.pc = regs->pc;
145 } else if (tsk == current) {
146 frame.fp = (unsigned long)__builtin_frame_address(0);
147 frame.sp = current_sp;
148 frame.pc = (unsigned long)dump_backtrace;
149 } else {
150 /*
151 * task blocked in __switch_to
152 */
153 frame.fp = thread_saved_fp(tsk);
154 frame.sp = thread_saved_sp(tsk);
155 frame.pc = thread_saved_pc(tsk);
156 }
157
158 printk("Call trace:\n");
159 while (1) {
160 unsigned long where = frame.pc;
161 int ret;
162
163 ret = unwind_frame(&frame);
164 if (ret < 0)
165 break;
166 dump_backtrace_entry(where, frame.sp);
167 }
168}
169
170void dump_stack(void)
171{
172 dump_backtrace(NULL, NULL);
173}
174
175EXPORT_SYMBOL(dump_stack);
176
177void show_stack(struct task_struct *tsk, unsigned long *sp)
178{
179 dump_backtrace(NULL, tsk);
180 barrier();
181}
182
183#ifdef CONFIG_PREEMPT
184#define S_PREEMPT " PREEMPT"
185#else
186#define S_PREEMPT ""
187#endif
188#ifdef CONFIG_SMP
189#define S_SMP " SMP"
190#else
191#define S_SMP ""
192#endif
193
194static int __die(const char *str, int err, struct thread_info *thread,
195 struct pt_regs *regs)
196{
197 struct task_struct *tsk = thread->task;
198 static int die_counter;
199 int ret;
200
201 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
202 str, err, ++die_counter);
203
204 /* trap and error numbers are mostly meaningless on ARM */
205 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
206 if (ret == NOTIFY_STOP)
207 return ret;
208
209 print_modules();
210 __show_regs(regs);
211 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
212 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
213
214 if (!user_mode(regs) || in_interrupt()) {
215 dump_mem(KERN_EMERG, "Stack: ", regs->sp,
216 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
217 dump_backtrace(regs, tsk);
218 dump_instr(KERN_EMERG, regs);
219 }
220
221 return ret;
222}
223
224static DEFINE_RAW_SPINLOCK(die_lock);
225
226/*
227 * This function is protected against re-entrancy.
228 */
229void die(const char *str, struct pt_regs *regs, int err)
230{
231 struct thread_info *thread = current_thread_info();
232 int ret;
233
234 oops_enter();
235
236 raw_spin_lock_irq(&die_lock);
237 console_verbose();
238 bust_spinlocks(1);
239 ret = __die(str, err, thread, regs);
240
241 if (regs && kexec_should_crash(thread->task))
242 crash_kexec(regs);
243
244 bust_spinlocks(0);
245 add_taint(TAINT_DIE);
246 raw_spin_unlock_irq(&die_lock);
247 oops_exit();
248
249 if (in_interrupt())
250 panic("Fatal exception in interrupt");
251 if (panic_on_oops)
252 panic("Fatal exception");
253 if (ret != NOTIFY_STOP)
254 do_exit(SIGSEGV);
255}
256
257void arm64_notify_die(const char *str, struct pt_regs *regs,
258 struct siginfo *info, int err)
259{
260 if (user_mode(regs))
261 force_sig_info(info->si_signo, info, current);
262 else
263 die(str, regs, err);
264}
265
266asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
267{
268 siginfo_t info;
269 void __user *pc = (void __user *)instruction_pointer(regs);
270
271#ifdef CONFIG_COMPAT
272 /* check for AArch32 breakpoint instructions */
273 if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0)
274 return;
275#endif
276
277 if (show_unhandled_signals) {
278 pr_info("%s[%d]: undefined instruction: pc=%p\n",
279 current->comm, task_pid_nr(current), pc);
280 dump_instr(KERN_INFO, regs);
281 }
282
283 info.si_signo = SIGILL;
284 info.si_errno = 0;
285 info.si_code = ILL_ILLOPC;
286 info.si_addr = pc;
287
288 arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
289}
290
291long compat_arm_syscall(struct pt_regs *regs);
292
293asmlinkage long do_ni_syscall(struct pt_regs *regs)
294{
295#ifdef CONFIG_COMPAT
296 long ret;
297 if (is_compat_task()) {
298 ret = compat_arm_syscall(regs);
299 if (ret != -ENOSYS)
300 return ret;
301 }
302#endif
303
304 if (show_unhandled_signals) {
305 pr_info("%s[%d]: syscall %d\n", current->comm,
306 task_pid_nr(current), (int)regs->syscallno);
307 dump_instr("", regs);
308 if (user_mode(regs))
309 __show_regs(regs);
310 }
311
312 return sys_ni_syscall();
313}
314
315/*
316 * bad_mode handles the impossible case in the exception vector.
317 */
318asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
319{
320 console_verbose();
321
322 pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
323 handler[reason], esr);
324
325 die("Oops - bad mode", regs, 0);
326 local_irq_disable();
327 panic("bad mode");
328}
329
330void __pte_error(const char *file, int line, unsigned long val)
331{
332 printk("%s:%d: bad pte %016lx.\n", file, line, val);
333}
334
335void __pmd_error(const char *file, int line, unsigned long val)
336{
337 printk("%s:%d: bad pmd %016lx.\n", file, line, val);
338}
339
340void __pgd_error(const char *file, int line, unsigned long val)
341{
342 printk("%s:%d: bad pgd %016lx.\n", file, line, val);
343}
344
345void __init trap_init(void)
346{
347 return;
348}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
new file mode 100644
index 000000000000..17948fc7d663
--- /dev/null
+++ b/arch/arm64/kernel/vdso.c
@@ -0,0 +1,261 @@
1/*
2 * VDSO implementation for AArch64 and vector page setup for AArch32.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/kernel.h>
22#include <linux/clocksource.h>
23#include <linux/elf.h>
24#include <linux/err.h>
25#include <linux/errno.h>
26#include <linux/gfp.h>
27#include <linux/mm.h>
28#include <linux/sched.h>
29#include <linux/signal.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32
33#include <asm/cacheflush.h>
34#include <asm/signal32.h>
35#include <asm/vdso.h>
36#include <asm/vdso_datapage.h>
37
38extern char vdso_start, vdso_end;
39static unsigned long vdso_pages;
40static struct page **vdso_pagelist;
41
42/*
43 * The vDSO data page.
44 */
45static union {
46 struct vdso_data data;
47 u8 page[PAGE_SIZE];
48} vdso_data_store __page_aligned_data;
49struct vdso_data *vdso_data = &vdso_data_store.data;
50
51#ifdef CONFIG_COMPAT
52/*
53 * Create and map the vectors page for AArch32 tasks.
54 */
55static struct page *vectors_page[1];
56
57static int alloc_vectors_page(void)
58{
59 extern char __kuser_helper_start[], __kuser_helper_end[];
60 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
61 unsigned long vpage;
62
63 vpage = get_zeroed_page(GFP_ATOMIC);
64
65 if (!vpage)
66 return -ENOMEM;
67
68 /* kuser helpers */
69 memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
70 kuser_sz);
71
72 /* sigreturn code */
73 memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
74 aarch32_sigret_code, sizeof(aarch32_sigret_code));
75
76 flush_icache_range(vpage, vpage + PAGE_SIZE);
77 vectors_page[0] = virt_to_page(vpage);
78
79 return 0;
80}
81arch_initcall(alloc_vectors_page);
82
83int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
84{
85 struct mm_struct *mm = current->mm;
86 unsigned long addr = AARCH32_VECTORS_BASE;
87 int ret;
88
89 down_write(&mm->mmap_sem);
90 current->mm->context.vdso = (void *)addr;
91
92 /* Map vectors page at the high address. */
93 ret = install_special_mapping(mm, addr, PAGE_SIZE,
94 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
95 vectors_page);
96
97 up_write(&mm->mmap_sem);
98
99 return ret;
100}
101#endif /* CONFIG_COMPAT */
102
103static int __init vdso_init(void)
104{
105 struct page *pg;
106 char *vbase;
107 int i, ret = 0;
108
109 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
110 pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
111 vdso_pages + 1, vdso_pages, 1L, &vdso_start);
112
113 /* Allocate the vDSO pagelist, plus a page for the data. */
114 vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
115 GFP_KERNEL);
116 if (vdso_pagelist == NULL) {
117 pr_err("Failed to allocate vDSO pagelist!\n");
118 return -ENOMEM;
119 }
120
121 /* Grab the vDSO code pages. */
122 for (i = 0; i < vdso_pages; i++) {
123 pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
124 ClearPageReserved(pg);
125 get_page(pg);
126 vdso_pagelist[i] = pg;
127 }
128
129 /* Sanity check the shared object header. */
130 vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
131 if (vbase == NULL) {
132 pr_err("Failed to map vDSO pagelist!\n");
133 return -ENOMEM;
134 } else if (memcmp(vbase, "\177ELF", 4)) {
135 pr_err("vDSO is not a valid ELF object!\n");
136 ret = -EINVAL;
137 goto unmap;
138 }
139
140 /* Grab the vDSO data page. */
141 pg = virt_to_page(vdso_data);
142 get_page(pg);
143 vdso_pagelist[i] = pg;
144
145unmap:
146 vunmap(vbase);
147 return ret;
148}
149arch_initcall(vdso_init);
150
151int arch_setup_additional_pages(struct linux_binprm *bprm,
152 int uses_interp)
153{
154 struct mm_struct *mm = current->mm;
155 unsigned long vdso_base, vdso_mapping_len;
156 int ret;
157
158 /* Be sure to map the data page */
159 vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
160
161 down_write(&mm->mmap_sem);
162 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
163 if (IS_ERR_VALUE(vdso_base)) {
164 ret = vdso_base;
165 goto up_fail;
166 }
167 mm->context.vdso = (void *)vdso_base;
168
169 ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
170 VM_READ|VM_EXEC|
171 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
172 vdso_pagelist);
173 if (ret) {
174 mm->context.vdso = NULL;
175 goto up_fail;
176 }
177
178up_fail:
179 up_write(&mm->mmap_sem);
180
181 return ret;
182}
183
184const char *arch_vma_name(struct vm_area_struct *vma)
185{
186 /*
187 * We can re-use the vdso pointer in mm_context_t for identifying
188 * the vectors page for compat applications. The vDSO will always
189 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
190 * it conflicting with the vectors base.
191 */
192 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
193#ifdef CONFIG_COMPAT
194 if (vma->vm_start == AARCH32_VECTORS_BASE)
195 return "[vectors]";
196#endif
197 return "[vdso]";
198 }
199
200 return NULL;
201}
202
203/*
204 * We define AT_SYSINFO_EHDR, so we need these function stubs to keep
205 * Linux happy.
206 */
207int in_gate_area_no_mm(unsigned long addr)
208{
209 return 0;
210}
211
212int in_gate_area(struct mm_struct *mm, unsigned long addr)
213{
214 return 0;
215}
216
217struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
218{
219 return NULL;
220}
221
222/*
223 * Update the vDSO data page to keep in sync with kernel timekeeping.
224 */
225void update_vsyscall(struct timespec *ts, struct timespec *wtm,
226 struct clocksource *clock, u32 mult)
227{
228 struct timespec xtime_coarse;
229 u32 use_syscall = strcmp(clock->name, "arch_sys_counter");
230
231 ++vdso_data->tb_seq_count;
232 smp_wmb();
233
234 xtime_coarse = __current_kernel_time();
235 vdso_data->use_syscall = use_syscall;
236 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
237 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
238
239 if (!use_syscall) {
240 vdso_data->cs_cycle_last = clock->cycle_last;
241 vdso_data->xtime_clock_sec = ts->tv_sec;
242 vdso_data->xtime_clock_nsec = ts->tv_nsec;
243 vdso_data->cs_mult = mult;
244 vdso_data->cs_shift = clock->shift;
245 vdso_data->wtm_clock_sec = wtm->tv_sec;
246 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
247 }
248
249 smp_wmb();
250 ++vdso_data->tb_seq_count;
251}
252
253void update_vsyscall_tz(void)
254{
255 ++vdso_data->tb_seq_count;
256 smp_wmb();
257 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
258 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
259 smp_wmb();
260 ++vdso_data->tb_seq_count;
261}
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..b8cc94e9698b
--- /dev/null
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
1vdso.lds
2vdso-offsets.h
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
new file mode 100644
index 000000000000..d8064af42e62
--- /dev/null
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -0,0 +1,63 @@
1#
2# Building a vDSO image for AArch64.
3#
4# Author: Will Deacon <will.deacon@arm.com>
5# Heavily based on the vDSO Makefiles for other archs.
6#
7
8obj-vdso := gettimeofday.o note.o sigreturn.o
9
10# Build rules
11targets := $(obj-vdso) vdso.so vdso.so.dbg
12obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
13
14ccflags-y := -shared -fno-common -fno-builtin
15ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
16 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
17
18obj-y += vdso.o
19extra-y += vdso.lds vdso-offsets.h
20CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
21
22# Force dependency (incbin is bad)
23$(obj)/vdso.o : $(obj)/vdso.so
24
25# Link rule for the .so file, .lds has to be first
26$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
27 $(call if_changed,vdsold)
28
29# Strip rule for the .so file
30$(obj)/%.so: OBJCOPYFLAGS := -S
31$(obj)/%.so: $(obj)/%.so.dbg FORCE
32 $(call if_changed,objcopy)
33
34# Generate VDSO offsets using helper script
35gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
36quiet_cmd_vdsosym = VDSOSYM $@
37define cmd_vdsosym
38 $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \
39 cp $@ include/generated/
40endef
41
42$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
43 $(call if_changed,vdsosym)
44
45# Assembly rules for the .S files
46$(obj-vdso): %.o: %.S
47 $(call if_changed_dep,vdsoas)
48
49# Actual build commands
50quiet_cmd_vdsold = VDSOL $@
51 cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
52quiet_cmd_vdsoas = VDSOA $@
53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
54
55# Install commands for the unstripped file
56quiet_cmd_vdso_install = INSTALL $@
57 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
58
59vdso.so: $(obj)/vdso.so.dbg
60 @mkdir -p $(MODLIB)/vdso
61 $(call cmd,vdso_install)
62
63vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..01924ff071ad
--- /dev/null
+++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
1#!/bin/sh
2
3#
4# Match symbols in the DSO that look like VDSO_*; produce a header file
5# of constant offsets into the shared object.
6#
7# Doing this inside the Makefile will break the $(filter-out) function,
8# causing Kbuild to rebuild the vdso-offsets header file every time.
9#
10# Author: Will Deacon <will.deacon@arm.com
11#
12
13LC_ALL=C
14sed -n -e 's/^00*/0/' -e \
15's/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
new file mode 100644
index 000000000000..dcb8c203a3b2
--- /dev/null
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -0,0 +1,242 @@
1/*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/linkage.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24
25#define NSEC_PER_SEC_LO16 0xca00
26#define NSEC_PER_SEC_HI16 0x3b9a
27
28vdso_data .req x6
29use_syscall .req w7
30seqcnt .req w8
31
32 .macro seqcnt_acquire
339999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
34 tbnz seqcnt, #0, 9999b
35 dmb ishld
36 ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
37 .endm
38
39 .macro seqcnt_read, cnt
40 dmb ishld
41 ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
42 .endm
43
44 .macro seqcnt_check, cnt, fail
45 cmp \cnt, seqcnt
46 b.ne \fail
47 .endm
48
49 .text
50
51/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
52ENTRY(__kernel_gettimeofday)
53 .cfi_startproc
54 mov x2, x30
55 .cfi_register x30, x2
56
57 /* Acquire the sequence counter and get the timespec. */
58 adr vdso_data, _vdso_data
591: seqcnt_acquire
60 cbnz use_syscall, 4f
61
62 /* If tv is NULL, skip to the timezone code. */
63 cbz x0, 2f
64 bl __do_get_tspec
65 seqcnt_check w13, 1b
66
67 /* Convert ns to us. */
68 mov x11, #1000
69 udiv x10, x10, x11
70 stp x9, x10, [x0, #TVAL_TV_SEC]
712:
72 /* If tz is NULL, return 0. */
73 cbz x1, 3f
74 ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
75 seqcnt_read w13
76 seqcnt_check w13, 1b
77 stp w4, w5, [x1, #TZ_MINWEST]
783:
79 mov x0, xzr
80 ret x2
814:
82 /* Syscall fallback. */
83 mov x8, #__NR_gettimeofday
84 svc #0
85 ret x2
86 .cfi_endproc
87ENDPROC(__kernel_gettimeofday)
88
89/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
90ENTRY(__kernel_clock_gettime)
91 .cfi_startproc
92 cmp w0, #CLOCK_REALTIME
93 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
94 b.ne 2f
95
96 mov x2, x30
97 .cfi_register x30, x2
98
99 /* Get kernel timespec. */
100 adr vdso_data, _vdso_data
1011: seqcnt_acquire
102 cbnz use_syscall, 7f
103
104 bl __do_get_tspec
105 seqcnt_check w13, 1b
106
107 cmp w0, #CLOCK_MONOTONIC
108 b.ne 6f
109
110 /* Get wtm timespec. */
111 ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC]
112
113 /* Check the sequence counter. */
114 seqcnt_read w13
115 seqcnt_check w13, 1b
116 b 4f
1172:
118 cmp w0, #CLOCK_REALTIME_COARSE
119 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
120 b.ne 8f
121
122 /* Get coarse timespec. */
123 adr vdso_data, _vdso_data
1243: seqcnt_acquire
125 ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC]
126
127 cmp w0, #CLOCK_MONOTONIC_COARSE
128 b.ne 6f
129
130 /* Get wtm timespec. */
131 ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC]
132
133 /* Check the sequence counter. */
134 seqcnt_read w13
135 seqcnt_check w13, 3b
1364:
137 /* Add on wtm timespec. */
138 add x9, x9, x14
139 add x10, x10, x15
140
141 /* Normalise the new timespec. */
142 mov x14, #NSEC_PER_SEC_LO16
143 movk x14, #NSEC_PER_SEC_HI16, lsl #16
144 cmp x10, x14
145 b.lt 5f
146 sub x10, x10, x14
147 add x9, x9, #1
1485:
149 cmp x10, #0
150 b.ge 6f
151 add x10, x10, x14
152 sub x9, x9, #1
153
1546: /* Store to the user timespec. */
155 stp x9, x10, [x1, #TSPEC_TV_SEC]
156 mov x0, xzr
157 ret x2
1587:
159 mov x30, x2
1608: /* Syscall fallback. */
161 mov x8, #__NR_clock_gettime
162 svc #0
163 ret
164 .cfi_endproc
165ENDPROC(__kernel_clock_gettime)
166
167/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
168ENTRY(__kernel_clock_getres)
169 .cfi_startproc
170 cbz w1, 3f
171
172 cmp w0, #CLOCK_REALTIME
173 ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
174 b.ne 1f
175
176 ldr x2, 5f
177 b 2f
1781:
179 cmp w0, #CLOCK_REALTIME_COARSE
180 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
181 b.ne 4f
182 ldr x2, 6f
1832:
184 stp xzr, x2, [x1]
185
1863: /* res == NULL. */
187 mov w0, wzr
188 ret
189
1904: /* Syscall fallback. */
191 mov x8, #__NR_clock_getres
192 svc #0
193 ret
1945:
195 .quad CLOCK_REALTIME_RES
1966:
197 .quad CLOCK_COARSE_RES
198 .cfi_endproc
199ENDPROC(__kernel_clock_getres)
200
201/*
202 * Read the current time from the architected counter.
203 * Expects vdso_data to be initialised.
204 * Clobbers the temporary registers (x9 - x15).
205 * Returns:
206 * - (x9, x10) = (ts->tv_sec, ts->tv_nsec)
207 * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec)
208 * - w13 = vDSO sequence counter
209 */
210ENTRY(__do_get_tspec)
211 .cfi_startproc
212
213 /* Read from the vDSO data page. */
214 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
215 ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC]
216 ldp w14, w15, [vdso_data, #VDSO_CS_MULT]
217 seqcnt_read w13
218
219 /* Read the physical counter. */
220 isb
221 mrs x9, cntpct_el0
222
223 /* Calculate cycle delta and convert to ns. */
224 sub x10, x9, x10
225 /* We can only guarantee 56 bits of precision. */
226 movn x9, #0xff0, lsl #48
227 and x10, x9, x10
228 mul x10, x10, x14
229 lsr x10, x10, x15
230
231 /* Use the kernel time to calculate the new timespec. */
232 add x10, x12, x10
233 mov x14, #NSEC_PER_SEC_LO16
234 movk x14, #NSEC_PER_SEC_HI16, lsl #16
235 udiv x15, x10, x14
236 add x9, x15, x11
237 mul x14, x14, x15
238 sub x10, x10, x14
239
240 ret
241 .cfi_endproc
242ENDPROC(__do_get_tspec)
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
new file mode 100644
index 000000000000..b82c85e5d972
--- /dev/null
+++ b/arch/arm64/kernel/vdso/note.S
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Author: Will Deacon <will.deacon@arm.com>
17 *
18 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
19 * Here we can supply some information useful to userland.
20 */
21
22#include <linux/uts.h>
23#include <linux/version.h>
24#include <linux/elfnote.h>
25
26ELFNOTE_START(Linux, 0, "a")
27 .long LINUX_VERSION_CODE
28ELFNOTE_END
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
new file mode 100644
index 000000000000..20d98effa7dd
--- /dev/null
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -0,0 +1,37 @@
1/*
2 * Sigreturn trampoline for returning from a signal when the SA_RESTORER
3 * flag is not set.
4 *
5 * Copyright (C) 2012 ARM Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 */
21
22#include <linux/linkage.h>
23#include <asm/unistd.h>
24
25 .text
26
27 nop
28ENTRY(__kernel_rt_sigreturn)
29 .cfi_startproc
30 .cfi_signal_frame
31 .cfi_def_cfa x29, 0
32 .cfi_offset x29, 0 * 8
33 .cfi_offset x30, 1 * 8
34 mov x8, #__NR_rt_sigreturn
35 svc #0
36 .cfi_endproc
37ENDPROC(__kernel_rt_sigreturn)
diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..60c1db54b41a
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.S
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Author: Will Deacon <will.deacon@arm.com>
17 */
18
19#include <linux/init.h>
20#include <linux/linkage.h>
21#include <linux/const.h>
22#include <asm/page.h>
23
24 __PAGE_ALIGNED_DATA
25
26 .globl vdso_start, vdso_end
27 .balign PAGE_SIZE
28vdso_start:
29 .incbin "arch/arm64/kernel/vdso/vdso.so"
30 .balign PAGE_SIZE
31vdso_end:
32
33 .previous
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..8154b8d1c826
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -0,0 +1,100 @@
1/*
2 * GNU linker script for the VDSO library.
3*
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 * Heavily based on the vDSO linker scripts for other archs.
20 */
21
22#include <linux/const.h>
23#include <asm/page.h>
24#include <asm/vdso.h>
25
26OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
27OUTPUT_ARCH(aarch64)
28
29SECTIONS
30{
31 . = VDSO_LBASE + SIZEOF_HEADERS;
32
33 .hash : { *(.hash) } :text
34 .gnu.hash : { *(.gnu.hash) }
35 .dynsym : { *(.dynsym) }
36 .dynstr : { *(.dynstr) }
37 .gnu.version : { *(.gnu.version) }
38 .gnu.version_d : { *(.gnu.version_d) }
39 .gnu.version_r : { *(.gnu.version_r) }
40
41 .note : { *(.note.*) } :text :note
42
43 . = ALIGN(16);
44
45 .text : { *(.text*) } :text =0xd503201f
46 PROVIDE (__etext = .);
47 PROVIDE (_etext = .);
48 PROVIDE (etext = .);
49
50 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
51 .eh_frame : { KEEP (*(.eh_frame)) } :text
52
53 .dynamic : { *(.dynamic) } :text :dynamic
54
55 .rodata : { *(.rodata*) } :text
56
57 _end = .;
58 PROVIDE(end = .);
59
60 . = ALIGN(PAGE_SIZE);
61 PROVIDE(_vdso_data = .);
62
63 /DISCARD/ : {
64 *(.note.GNU-stack)
65 *(.data .data.* .gnu.linkonce.d.* .sdata*)
66 *(.bss .sbss .dynbss .dynsbss)
67 }
68}
69
70/*
71 * We must supply the ELF program headers explicitly to get just one
72 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
73 */
74PHDRS
75{
76 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
77 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
78 note PT_NOTE FLAGS(4); /* PF_R */
79 eh_frame_hdr PT_GNU_EH_FRAME;
80}
81
82/*
83 * This controls what symbols we export from the DSO.
84 */
85VERSION
86{
87 LINUX_2.6.39 {
88 global:
89 __kernel_rt_sigreturn;
90 __kernel_gettimeofday;
91 __kernel_clock_gettime;
92 __kernel_clock_getres;
93 local: *;
94 };
95}
96
97/*
98 * Make the sigreturn code visible to the kernel.
99 */
100VDSO_sigtramp = __kernel_rt_sigreturn;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..3fae2be8b016
--- /dev/null
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -0,0 +1,126 @@
1/*
2 * ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/thread_info.h>
9#include <asm/memory.h>
10#include <asm/page.h>
11
12#define ARM_EXIT_KEEP(x)
13#define ARM_EXIT_DISCARD(x) x
14
15OUTPUT_ARCH(aarch64)
16ENTRY(stext)
17
18jiffies = jiffies_64;
19
20SECTIONS
21{
22 /*
23 * XXX: The linker does not define how output sections are
24 * assigned to input sections when there are multiple statements
25 * matching the same input section name. There is no documented
26 * order of matching.
27 */
28 /DISCARD/ : {
29 ARM_EXIT_DISCARD(EXIT_TEXT)
30 ARM_EXIT_DISCARD(EXIT_DATA)
31 EXIT_CALL
32 *(.discard)
33 *(.discard.*)
34 }
35
36 . = PAGE_OFFSET + TEXT_OFFSET;
37
38 .head.text : {
39 _text = .;
40 HEAD_TEXT
41 }
42 .text : { /* Real text segment */
43 _stext = .; /* Text and read-only data */
44 *(.smp.pen.text)
45 __exception_text_start = .;
46 *(.exception.text)
47 __exception_text_end = .;
48 IRQENTRY_TEXT
49 TEXT_TEXT
50 SCHED_TEXT
51 LOCK_TEXT
52 *(.fixup)
53 *(.gnu.warning)
54 . = ALIGN(16);
55 *(.got) /* Global offset table */
56 }
57
58 RO_DATA(PAGE_SIZE)
59
60 _etext = .; /* End of text and rodata section */
61
62 . = ALIGN(PAGE_SIZE);
63 __init_begin = .;
64
65 INIT_TEXT_SECTION(8)
66 .exit.text : {
67 ARM_EXIT_KEEP(EXIT_TEXT)
68 }
69 . = ALIGN(16);
70 .init.data : {
71 INIT_DATA
72 INIT_SETUP(16)
73 INIT_CALLS
74 CON_INITCALL
75 SECURITY_INITCALL
76 INIT_RAM_FS
77 }
78 .exit.data : {
79 ARM_EXIT_KEEP(EXIT_DATA)
80 }
81
82 PERCPU_SECTION(64)
83
84 __init_end = .;
85 . = ALIGN(THREAD_SIZE);
86 __data_loc = .;
87
88 .data : AT(__data_loc) {
89 _data = .; /* address in memory */
90 _sdata = .;
91
92 /*
93 * first, the init task union, aligned
94 * to an 8192 byte boundary.
95 */
96 INIT_TASK_DATA(THREAD_SIZE)
97 NOSAVE_DATA
98 CACHELINE_ALIGNED_DATA(64)
99 READ_MOSTLY_DATA(64)
100
101 /*
102 * The exception fixup table (might need resorting at runtime)
103 */
104 . = ALIGN(32);
105 __start___ex_table = .;
106 *(__ex_table)
107 __stop___ex_table = .;
108
109 /*
110 * and the usual data section
111 */
112 DATA_DATA
113 CONSTRUCTORS
114
115 _edata = .;
116 }
117 _edata_loc = __data_loc + SIZEOF(.data);
118
119 NOTES
120
121 BSS_SECTION(0, 0, 0)
122 _end = .;
123
124 STABS_DEBUG
125 .comment 0 : { *(.comment) }
126}
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
new file mode 100644
index 000000000000..2fb7f6092aae
--- /dev/null
+++ b/arch/arm64/lib/Makefile
@@ -0,0 +1,4 @@
1lib-y := bitops.o delay.o \
2 strncpy_from_user.o strnlen_user.o clear_user.o \
3 copy_from_user.o copy_to_user.o copy_in_user.o \
4 copy_page.o clear_page.o
diff --git a/arch/arm64/lib/bitops.c b/arch/arm64/lib/bitops.c
new file mode 100644
index 000000000000..aa4965e60acc
--- /dev/null
+++ b/arch/arm64/lib/bitops.c
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 ARM Limited
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/spinlock.h>
19#include <linux/atomic.h>
20
21#ifdef CONFIG_SMP
22arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
23 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
24};
25#endif
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
new file mode 100644
index 000000000000..ef08e905e35b
--- /dev/null
+++ b/arch/arm64/lib/clear_page.S
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <linux/const.h>
19#include <asm/assembler.h>
20#include <asm/page.h>
21
22/*
23 * Clear page @dest
24 *
25 * Parameters:
26 * x0 - dest
27 */
28ENTRY(clear_page)
29 mrs x1, dczid_el0
30 and w1, w1, #0xf
31 mov x2, #4
32 lsl x1, x2, x1
33
341: dc zva, x0
35 add x0, x0, x1
36 tst x0, #(PAGE_SIZE - 1)
37 b.ne 1b
38 ret
39ENDPROC(clear_page)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
new file mode 100644
index 000000000000..6e0ed93d51fe
--- /dev/null
+++ b/arch/arm64/lib/clear_user.S
@@ -0,0 +1,58 @@
1/*
2 * Based on arch/arm/lib/clear_user.S
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/linkage.h>
19#include <asm/assembler.h>
20
21 .text
22
23/* Prototype: int __clear_user(void *addr, size_t sz)
24 * Purpose : clear some user memory
25 * Params : addr - user memory address to clear
26 * : sz - number of bytes to clear
27 * Returns : number of bytes NOT cleared
28 *
29 * Alignment fixed up by hardware.
30 */
31ENTRY(__clear_user)
32 mov x2, x1 // save the size for fixup return
33 subs x1, x1, #8
34 b.mi 2f
351:
36USER(9f, str xzr, [x0], #8 )
37 subs x1, x1, #8
38 b.pl 1b
392: adds x1, x1, #4
40 b.mi 3f
41USER(9f, str wzr, [x0], #4 )
42 sub x1, x1, #4
433: adds x1, x1, #2
44 b.mi 4f
45USER(9f, strh wzr, [x0], #2 )
46 sub x1, x1, #2
474: adds x1, x1, #1
48 b.mi 5f
49 strb wzr, [x0]
505: mov x0, #0
51 ret
52ENDPROC(__clear_user)
53
54 .section .fixup,"ax"
55 .align 2
569: mov x0, x2 // return the original size
57 ret
58 .previous
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
new file mode 100644
index 000000000000..5e27add9d362
--- /dev/null
+++ b/arch/arm64/lib/copy_from_user.S
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Copy from user space to a kernel buffer (alignment handled by the hardware)
22 *
23 * Parameters:
24 * x0 - to
25 * x1 - from
26 * x2 - n
27 * Returns:
28 * x0 - bytes not copied
29 */
30ENTRY(__copy_from_user)
31 add x4, x1, x2 // upper user buffer boundary
32 subs x2, x2, #8
33 b.mi 2f
341:
35USER(9f, ldr x3, [x1], #8 )
36 subs x2, x2, #8
37 str x3, [x0], #8
38 b.pl 1b
392: adds x2, x2, #4
40 b.mi 3f
41USER(9f, ldr w3, [x1], #4 )
42 sub x2, x2, #4
43 str w3, [x0], #4
443: adds x2, x2, #2
45 b.mi 4f
46USER(9f, ldrh w3, [x1], #2 )
47 sub x2, x2, #2
48 strh w3, [x0], #2
494: adds x2, x2, #1
50 b.mi 5f
51USER(9f, ldrb w3, [x1] )
52 strb w3, [x0]
535: mov x0, #0
54 ret
55ENDPROC(__copy_from_user)
56
57 .section .fixup,"ax"
58 .align 2
599: sub x2, x4, x1
60 mov x3, x2
6110: strb wzr, [x0], #1 // zero remaining buffer space
62 subs x3, x3, #1
63 b.ne 10b
64 mov x0, x2 // bytes not copied
65 ret
66 .previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
new file mode 100644
index 000000000000..84b6c9bb9b93
--- /dev/null
+++ b/arch/arm64/lib/copy_in_user.S
@@ -0,0 +1,63 @@
1/*
2 * Copy from user space to user space
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/linkage.h>
20#include <asm/assembler.h>
21
22/*
23 * Copy from user space to user space (alignment handled by the hardware)
24 *
25 * Parameters:
26 * x0 - to
27 * x1 - from
28 * x2 - n
29 * Returns:
30 * x0 - bytes not copied
31 */
32ENTRY(__copy_in_user)
33 add x4, x0, x2 // upper user buffer boundary
34 subs x2, x2, #8
35 b.mi 2f
361:
37USER(9f, ldr x3, [x1], #8 )
38 subs x2, x2, #8
39USER(9f, str x3, [x0], #8 )
40 b.pl 1b
412: adds x2, x2, #4
42 b.mi 3f
43USER(9f, ldr w3, [x1], #4 )
44 sub x2, x2, #4
45USER(9f, str w3, [x0], #4 )
463: adds x2, x2, #2
47 b.mi 4f
48USER(9f, ldrh w3, [x1], #2 )
49 sub x2, x2, #2
50USER(9f, strh w3, [x0], #2 )
514: adds x2, x2, #1
52 b.mi 5f
53USER(9f, ldrb w3, [x1] )
54USER(9f, strb w3, [x0] )
555: mov x0, #0
56 ret
57ENDPROC(__copy_in_user)
58
59 .section .fixup,"ax"
60 .align 2
619: sub x0, x4, x0 // bytes not copied
62 ret
63 .previous
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
new file mode 100644
index 000000000000..512b9a7b980e
--- /dev/null
+++ b/arch/arm64/lib/copy_page.S
@@ -0,0 +1,46 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <linux/const.h>
19#include <asm/assembler.h>
20#include <asm/page.h>
21
22/*
23 * Copy a page from src to dest (both are page aligned)
24 *
25 * Parameters:
26 * x0 - dest
27 * x1 - src
28 */
29ENTRY(copy_page)
30 /* Assume cache line size is 64 bytes. */
31 prfm pldl1strm, [x1, #64]
321: ldp x2, x3, [x1]
33 ldp x4, x5, [x1, #16]
34 ldp x6, x7, [x1, #32]
35 ldp x8, x9, [x1, #48]
36 add x1, x1, #64
37 prfm pldl1strm, [x1, #64]
38 stnp x2, x3, [x0]
39 stnp x4, x5, [x0, #16]
40 stnp x6, x7, [x0, #32]
41 stnp x8, x9, [x0, #48]
42 add x0, x0, #64
43 tst x1, #(PAGE_SIZE - 1)
44 b.ne 1b
45 ret
46ENDPROC(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
new file mode 100644
index 000000000000..a0aeeb9b7a28
--- /dev/null
+++ b/arch/arm64/lib/copy_to_user.S
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Copy to user space from a kernel buffer (alignment handled by the hardware)
22 *
23 * Parameters:
24 * x0 - to
25 * x1 - from
26 * x2 - n
27 * Returns:
28 * x0 - bytes not copied
29 */
30ENTRY(__copy_to_user)
31 add x4, x0, x2 // upper user buffer boundary
32 subs x2, x2, #8
33 b.mi 2f
341:
35 ldr x3, [x1], #8
36 subs x2, x2, #8
37USER(9f, str x3, [x0], #8 )
38 b.pl 1b
392: adds x2, x2, #4
40 b.mi 3f
41 ldr w3, [x1], #4
42 sub x2, x2, #4
43USER(9f, str w3, [x0], #4 )
443: adds x2, x2, #2
45 b.mi 4f
46 ldrh w3, [x1], #2
47 sub x2, x2, #2
48USER(9f, strh w3, [x0], #2 )
494: adds x2, x2, #1
50 b.mi 5f
51 ldrb w3, [x1]
52USER(9f, strb w3, [x0] )
535: mov x0, #0
54 ret
55ENDPROC(__copy_to_user)
56
57 .section .fixup,"ax"
58 .align 2
599: sub x0, x4, x0 // bytes not copied
60 ret
61 .previous
diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c
new file mode 100644
index 000000000000..dad4ec9bbfd1
--- /dev/null
+++ b/arch/arm64/lib/delay.c
@@ -0,0 +1,55 @@
1/*
2 * Delay loops based on the OpenRISC implementation.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/timex.h>
26
27void __delay(unsigned long cycles)
28{
29 cycles_t start = get_cycles();
30
31 while ((get_cycles() - start) < cycles)
32 cpu_relax();
33}
34EXPORT_SYMBOL(__delay);
35
36inline void __const_udelay(unsigned long xloops)
37{
38 unsigned long loops;
39
40 loops = xloops * loops_per_jiffy * HZ;
41 __delay(loops >> 32);
42}
43EXPORT_SYMBOL(__const_udelay);
44
45void __udelay(unsigned long usecs)
46{
47 __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
48}
49EXPORT_SYMBOL(__udelay);
50
51void __ndelay(unsigned long nsecs)
52{
53 __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
54}
55EXPORT_SYMBOL(__ndelay);
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..56e448a831a0
--- /dev/null
+++ b/arch/arm64/lib/strncpy_from_user.S
@@ -0,0 +1,50 @@
1/*
2 * Based on arch/arm/lib/strncpy_from_user.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/errno.h>
23
24 .text
25 .align 5
26
27/*
28 * Copy a string from user space to kernel space.
29 * x0 = dst, x1 = src, x2 = byte length
30 * returns the number of characters copied (strlen of copied string),
31 * -EFAULT on exception, or "len" if we fill the whole buffer
32 */
33ENTRY(__strncpy_from_user)
34 mov x4, x1
351: subs x2, x2, #1
36 bmi 2f
37USER(9f, ldrb w3, [x1], #1 )
38 strb w3, [x0], #1
39 cbnz w3, 1b
40 sub x1, x1, #1 // take NUL character out of count
412: sub x0, x1, x4
42 ret
43ENDPROC(__strncpy_from_user)
44
45 .section .fixup,"ax"
46 .align 0
479: strb wzr, [x0] // null terminate
48 mov x0, #-EFAULT
49 ret
50 .previous
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
new file mode 100644
index 000000000000..7f7b176a5646
--- /dev/null
+++ b/arch/arm64/lib/strnlen_user.S
@@ -0,0 +1,47 @@
1/*
2 * Based on arch/arm/lib/strnlen_user.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/errno.h>
23
24 .text
25 .align 5
26
27/* Prototype: unsigned long __strnlen_user(const char *str, long n)
28 * Purpose : get length of a string in user memory
29 * Params : str - address of string in user memory
30 * Returns : length of string *including terminator*
31 * or zero on exception, or n if too long
32 */
33ENTRY(__strnlen_user)
34 mov x2, x0
351: subs x1, x1, #1
36 b.mi 2f
37USER(9f, ldrb w3, [x0], #1 )
38 cbnz w3, 1b
392: sub x0, x0, x2
40 ret
41ENDPROC(__strnlen_user)
42
43 .section .fixup,"ax"
44 .align 0
459: mov x0, #0
46 ret
47 .previous
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
new file mode 100644
index 000000000000..3140a2abcdc2
--- /dev/null
+++ b/arch/arm64/mm/Makefile
@@ -0,0 +1,4 @@
1obj-y := dma-mapping.o extable.o fault.o init.o \
2 cache.o copypage.o flush.o \
3 ioremap.o mmap.o pgd.o mmu.o \
4 context.o tlb.o proc.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
new file mode 100644
index 000000000000..abe69b80cf7f
--- /dev/null
+++ b/arch/arm64/mm/cache.S
@@ -0,0 +1,168 @@
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <linux/init.h>
22#include <asm/assembler.h>
23
24#include "proc-macros.S"
25
26/*
27 * __flush_dcache_all()
28 *
29 * Flush the whole D-cache.
30 *
31 * Corrupted registers: x0-x7, x9-x11
32 */
33ENTRY(__flush_dcache_all)
34 dsb sy // ensure ordering with previous memory accesses
35 mrs x0, clidr_el1 // read clidr
36 and x3, x0, #0x7000000 // extract loc from clidr
37 lsr x3, x3, #23 // left align loc bit field
38 cbz x3, finished // if loc is 0, then no need to clean
39 mov x10, #0 // start clean at cache level 0
40loop1:
41 add x2, x10, x10, lsr #1 // work out 3x current cache level
42 lsr x1, x0, x2 // extract cache type bits from clidr
43 and x1, x1, #7 // mask of the bits for current cache only
44 cmp x1, #2 // see what cache we have at this level
45 b.lt skip // skip if no cache, or just i-cache
46 save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
47 msr csselr_el1, x10 // select current cache level in csselr
48 isb // isb to sych the new cssr&csidr
49 mrs x1, ccsidr_el1 // read the new ccsidr
50 restore_irqs x9
51 and x2, x1, #7 // extract the length of the cache lines
52 add x2, x2, #4 // add 4 (line length offset)
53 mov x4, #0x3ff
54 and x4, x4, x1, lsr #3 // find maximum number on the way size
55 clz x5, x4 // find bit position of way size increment
56 mov x7, #0x7fff
57 and x7, x7, x1, lsr #13 // extract max number of the index size
58loop2:
59 mov x9, x4 // create working copy of max way size
60loop3:
61 lsl x6, x9, x5
62 orr x11, x10, x6 // factor way and cache number into x11
63 lsl x6, x7, x2
64 orr x11, x11, x6 // factor index number into x11
65 dc cisw, x11 // clean & invalidate by set/way
66 subs x9, x9, #1 // decrement the way
67 b.ge loop3
68 subs x7, x7, #1 // decrement the index
69 b.ge loop2
70skip:
71 add x10, x10, #2 // increment cache number
72 cmp x3, x10
73 b.gt loop1
74finished:
75 mov x10, #0 // swith back to cache level 0
76 msr csselr_el1, x10 // select current cache level in csselr
77 dsb sy
78 isb
79 ret
80ENDPROC(__flush_dcache_all)
81
82/*
83 * flush_cache_all()
84 *
85 * Flush the entire cache system. The data cache flush is now achieved
86 * using atomic clean / invalidates working outwards from L1 cache. This
87 * is done using Set/Way based cache maintainance instructions. The
88 * instruction cache can still be invalidated back to the point of
89 * unification in a single instruction.
90 */
91ENTRY(flush_cache_all)
92 mov x12, lr
93 bl __flush_dcache_all
94 mov x0, #0
95 ic ialluis // I+BTB cache invalidate
96 ret x12
97ENDPROC(flush_cache_all)
98
99/*
100 * flush_icache_range(start,end)
101 *
102 * Ensure that the I and D caches are coherent within specified region.
103 * This is typically used when code has been written to a memory region,
104 * and will be executed.
105 *
106 * - start - virtual start address of region
107 * - end - virtual end address of region
108 */
109ENTRY(flush_icache_range)
110 /* FALLTHROUGH */
111
112/*
113 * __flush_cache_user_range(start,end)
114 *
115 * Ensure that the I and D caches are coherent within specified region.
116 * This is typically used when code has been written to a memory region,
117 * and will be executed.
118 *
119 * - start - virtual start address of region
120 * - end - virtual end address of region
121 */
122ENTRY(__flush_cache_user_range)
123 dcache_line_size x2, x3
124 sub x3, x2, #1
125 bic x4, x0, x3
1261:
127USER(9f, dc cvau, x4 ) // clean D line to PoU
128 add x4, x4, x2
129 cmp x4, x1
130 b.lo 1b
131 dsb sy
132
133 icache_line_size x2, x3
134 sub x3, x2, #1
135 bic x4, x0, x3
1361:
137USER(9f, ic ivau, x4 ) // invalidate I line PoU
138 add x4, x4, x2
139 cmp x4, x1
140 b.lo 1b
1419: // ignore any faulting cache operation
142 dsb sy
143 isb
144 ret
145ENDPROC(flush_icache_range)
146ENDPROC(__flush_cache_user_range)
147
148/*
149 * __flush_kern_dcache_page(kaddr)
150 *
151 * Ensure that the data held in the page kaddr is written back to the
152 * page in question.
153 *
154 * - kaddr - kernel address
155 * - size - size in question
156 */
157ENTRY(__flush_dcache_area)
158 dcache_line_size x2, x3
159 add x1, x0, x1
160 sub x3, x2, #1
161 bic x0, x0, x3
1621: dc civac, x0 // clean & invalidate D line / unified line
163 add x0, x0, x2
164 cmp x0, x1
165 b.lo 1b
166 dsb sy
167 ret
168ENDPROC(__flush_dcache_area)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
new file mode 100644
index 000000000000..baa758d37021
--- /dev/null
+++ b/arch/arm64/mm/context.c
@@ -0,0 +1,159 @@
1/*
2 * Based on arch/arm/mm/context.c
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/percpu.h>
25
26#include <asm/mmu_context.h>
27#include <asm/tlbflush.h>
28#include <asm/cachetype.h>
29
30#define asid_bits(reg) \
31 (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8)
32
33#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS)
34
35static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
36unsigned int cpu_last_asid = ASID_FIRST_VERSION;
37
38/*
39 * We fork()ed a process, and we need a new context for the child to run in.
40 */
41void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
42{
43 mm->context.id = 0;
44 raw_spin_lock_init(&mm->context.id_lock);
45}
46
47static void flush_context(void)
48{
49 /* set the reserved TTBR0 before flushing the TLB */
50 cpu_set_reserved_ttbr0();
51 flush_tlb_all();
52 if (icache_is_aivivt())
53 __flush_icache_all();
54}
55
56#ifdef CONFIG_SMP
57
58static void set_mm_context(struct mm_struct *mm, unsigned int asid)
59{
60 unsigned long flags;
61
62 /*
63 * Locking needed for multi-threaded applications where the same
64 * mm->context.id could be set from different CPUs during the
65 * broadcast. This function is also called via IPI so the
66 * mm->context.id_lock has to be IRQ-safe.
67 */
68 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
69 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
70 /*
71 * Old version of ASID found. Set the new one and reset
72 * mm_cpumask(mm).
73 */
74 mm->context.id = asid;
75 cpumask_clear(mm_cpumask(mm));
76 }
77 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
78
79 /*
80 * Set the mm_cpumask(mm) bit for the current CPU.
81 */
82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
83}
84
85/*
86 * Reset the ASID on the current CPU. This function call is broadcast from the
87 * CPU handling the ASID rollover and holding cpu_asid_lock.
88 */
89static void reset_context(void *info)
90{
91 unsigned int asid;
92 unsigned int cpu = smp_processor_id();
93 struct mm_struct *mm = current->active_mm;
94
95 smp_rmb();
96 asid = cpu_last_asid + cpu;
97
98 flush_context();
99 set_mm_context(mm, asid);
100
101 /* set the new ASID */
102 cpu_switch_mm(mm->pgd, mm);
103}
104
105#else
106
107static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
108{
109 mm->context.id = asid;
110 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
111}
112
113#endif
114
115void __new_context(struct mm_struct *mm)
116{
117 unsigned int asid;
118 unsigned int bits = asid_bits();
119
120 raw_spin_lock(&cpu_asid_lock);
121#ifdef CONFIG_SMP
122 /*
123 * Check the ASID again, in case the change was broadcast from another
124 * CPU before we acquired the lock.
125 */
126 if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
128 raw_spin_unlock(&cpu_asid_lock);
129 return;
130 }
131#endif
132 /*
133 * At this point, it is guaranteed that the current mm (with an old
134 * ASID) isn't active on any other CPU since the ASIDs are changed
135 * simultaneously via IPI.
136 */
137 asid = ++cpu_last_asid;
138
139 /*
140 * If we've used up all our ASIDs, we need to start a new version and
141 * flush the TLB.
142 */
143 if (unlikely((asid & ((1 << bits) - 1)) == 0)) {
144 /* increment the ASID version */
145 cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits);
146 if (cpu_last_asid == 0)
147 cpu_last_asid = ASID_FIRST_VERSION;
148 asid = cpu_last_asid + smp_processor_id();
149 flush_context();
150#ifdef CONFIG_SMP
151 smp_wmb();
152 smp_call_function(reset_context, NULL, 1);
153#endif
154 cpu_last_asid += NR_CPUS - 1;
155 }
156
157 set_mm_context(mm, asid);
158 raw_spin_unlock(&cpu_asid_lock);
159}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
new file mode 100644
index 000000000000..9aecbace4128
--- /dev/null
+++ b/arch/arm64/mm/copypage.c
@@ -0,0 +1,34 @@
1/*
2 * Based on arch/arm/mm/copypage.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/mm.h>
21
22#include <asm/page.h>
23#include <asm/cacheflush.h>
24
25void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
26{
27 copy_page(kto, kfrom);
28 __flush_dcache_area(kto, PAGE_SIZE);
29}
30
31void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
32{
33 clear_page(kaddr);
34}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
new file mode 100644
index 000000000000..5eb244453a5b
--- /dev/null
+++ b/arch/arm64/mm/dma-mapping.c
@@ -0,0 +1,79 @@
1/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h>
25#include <linux/swiotlb.h>
26
27#include <asm/cacheflush.h>
28
29struct dma_map_ops *dma_ops;
30EXPORT_SYMBOL(dma_ops);
31
32static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flags,
34 struct dma_attrs *attrs)
35{
36 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
37 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
38 flags |= GFP_DMA32;
39 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
40}
41
42static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle,
44 struct dma_attrs *attrs)
45{
46 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
47}
48
49static struct dma_map_ops arm64_swiotlb_dma_ops = {
50 .alloc = arm64_swiotlb_alloc_coherent,
51 .free = arm64_swiotlb_free_coherent,
52 .map_page = swiotlb_map_page,
53 .unmap_page = swiotlb_unmap_page,
54 .map_sg = swiotlb_map_sg_attrs,
55 .unmap_sg = swiotlb_unmap_sg_attrs,
56 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
57 .sync_single_for_device = swiotlb_sync_single_for_device,
58 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
59 .sync_sg_for_device = swiotlb_sync_sg_for_device,
60 .dma_supported = swiotlb_dma_supported,
61 .mapping_error = swiotlb_dma_mapping_error,
62};
63
64void __init swiotlb_init_with_default_size(size_t default_size, int verbose);
65
66void __init arm64_swiotlb_init(size_t max_size)
67{
68 dma_ops = &arm64_swiotlb_dma_ops;
69 swiotlb_init_with_default_size(min((size_t)SZ_64M, max_size), 1);
70}
71
72#define PREALLOC_DMA_DEBUG_ENTRIES 4096
73
74static int __init dma_debug_do_init(void)
75{
76 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
77 return 0;
78}
79fs_initcall(dma_debug_do_init);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
new file mode 100644
index 000000000000..79444279ba8c
--- /dev/null
+++ b/arch/arm64/mm/extable.c
@@ -0,0 +1,17 @@
1/*
2 * Based on arch/arm/mm/extable.c
3 */
4
5#include <linux/module.h>
6#include <linux/uaccess.h>
7
8int fixup_exception(struct pt_regs *regs)
9{
10 const struct exception_table_entry *fixup;
11
12 fixup = search_exception_tables(instruction_pointer(regs));
13 if (fixup)
14 regs->pc = fixup->fixup;
15
16 return fixup != NULL;
17}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
new file mode 100644
index 000000000000..1909a69983ca
--- /dev/null
+++ b/arch/arm64/mm/fault.c
@@ -0,0 +1,534 @@
1/*
2 * Based on arch/arm/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/module.h>
22#include <linux/signal.h>
23#include <linux/mm.h>
24#include <linux/hardirq.h>
25#include <linux/init.h>
26#include <linux/kprobes.h>
27#include <linux/uaccess.h>
28#include <linux/page-flags.h>
29#include <linux/sched.h>
30#include <linux/highmem.h>
31#include <linux/perf_event.h>
32
33#include <asm/exception.h>
34#include <asm/debug-monitors.h>
35#include <asm/system_misc.h>
36#include <asm/pgtable.h>
37#include <asm/tlbflush.h>
38
39/*
40 * Dump out the page tables associated with 'addr' in mm 'mm'.
41 */
42void show_pte(struct mm_struct *mm, unsigned long addr)
43{
44 pgd_t *pgd;
45
46 if (!mm)
47 mm = &init_mm;
48
49 pr_alert("pgd = %p\n", mm->pgd);
50 pgd = pgd_offset(mm, addr);
51 pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
52
53 do {
54 pud_t *pud;
55 pmd_t *pmd;
56 pte_t *pte;
57
58 if (pgd_none_or_clear_bad(pgd))
59 break;
60
61 pud = pud_offset(pgd, addr);
62 if (pud_none_or_clear_bad(pud))
63 break;
64
65 pmd = pmd_offset(pud, addr);
66 printk(", *pmd=%016llx", pmd_val(*pmd));
67 if (pmd_none_or_clear_bad(pmd))
68 break;
69
70 pte = pte_offset_map(pmd, addr);
71 printk(", *pte=%016llx", pte_val(*pte));
72 pte_unmap(pte);
73 } while(0);
74
75 printk("\n");
76}
77
78/*
79 * The kernel tried to access some page that wasn't present.
80 */
81static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
82 unsigned int esr, struct pt_regs *regs)
83{
84 /*
85 * Are we prepared to handle this kernel fault?
86 */
87 if (fixup_exception(regs))
88 return;
89
90 /*
91 * No handler, we'll have to terminate things with extreme prejudice.
92 */
93 bust_spinlocks(1);
94 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
95 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
96 "paging request", addr);
97
98 show_pte(mm, addr);
99 die("Oops", regs, esr);
100 bust_spinlocks(0);
101 do_exit(SIGKILL);
102}
103
104/*
105 * Something tried to access memory that isn't in our memory map. User mode
106 * accesses just cause a SIGSEGV
107 */
108static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
109 unsigned int esr, unsigned int sig, int code,
110 struct pt_regs *regs)
111{
112 struct siginfo si;
113
114 if (show_unhandled_signals) {
115 pr_info("%s[%d]: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
116 tsk->comm, task_pid_nr(tsk), sig, addr, esr);
117 show_pte(tsk->mm, addr);
118 show_regs(regs);
119 }
120
121 tsk->thread.fault_address = addr;
122 si.si_signo = sig;
123 si.si_errno = 0;
124 si.si_code = code;
125 si.si_addr = (void __user *)addr;
126 force_sig_info(sig, &si, tsk);
127}
128
129void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
130{
131 struct task_struct *tsk = current;
132 struct mm_struct *mm = tsk->active_mm;
133
134 /*
135 * If we are in kernel mode at this point, we have no context to
136 * handle this fault with.
137 */
138 if (user_mode(regs))
139 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
140 else
141 __do_kernel_fault(mm, addr, esr, regs);
142}
143
144#define VM_FAULT_BADMAP 0x010000
145#define VM_FAULT_BADACCESS 0x020000
146
147#define ESR_WRITE (1 << 6)
148#define ESR_LNX_EXEC (1 << 24)
149
150/*
151 * Check that the permissions on the VMA allow for the fault which occurred.
152 * If we encountered a write fault, we must have write permission, otherwise
153 * we allow any permission.
154 */
155static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
156{
157 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
158
159 if (esr & ESR_WRITE)
160 mask = VM_WRITE;
161 if (esr & ESR_LNX_EXEC)
162 mask = VM_EXEC;
163
164 return vma->vm_flags & mask ? false : true;
165}
166
167static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
168 unsigned int esr, unsigned int flags,
169 struct task_struct *tsk)
170{
171 struct vm_area_struct *vma;
172 int fault;
173
174 vma = find_vma(mm, addr);
175 fault = VM_FAULT_BADMAP;
176 if (unlikely(!vma))
177 goto out;
178 if (unlikely(vma->vm_start > addr))
179 goto check_stack;
180
181 /*
182 * Ok, we have a good vm_area for this memory access, so we can handle
183 * it.
184 */
185good_area:
186 if (access_error(esr, vma)) {
187 fault = VM_FAULT_BADACCESS;
188 goto out;
189 }
190
191 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
192
193check_stack:
194 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
195 goto good_area;
196out:
197 return fault;
198}
199
200static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
201 struct pt_regs *regs)
202{
203 struct task_struct *tsk;
204 struct mm_struct *mm;
205 int fault, sig, code;
206 int write = esr & ESR_WRITE;
207 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
208 (write ? FAULT_FLAG_WRITE : 0);
209
210 tsk = current;
211 mm = tsk->mm;
212
213 /* Enable interrupts if they were enabled in the parent context. */
214 if (interrupts_enabled(regs))
215 local_irq_enable();
216
217 /*
218 * If we're in an interrupt or have no user context, we must not take
219 * the fault.
220 */
221 if (in_atomic() || !mm)
222 goto no_context;
223
224 /*
225 * As per x86, we may deadlock here. However, since the kernel only
226 * validly references user space from well defined areas of the code,
227 * we can bug out early if this is from code which shouldn't.
228 */
229 if (!down_read_trylock(&mm->mmap_sem)) {
230 if (!user_mode(regs) && !search_exception_tables(regs->pc))
231 goto no_context;
232retry:
233 down_read(&mm->mmap_sem);
234 } else {
235 /*
236 * The above down_read_trylock() might have succeeded in which
237 * case, we'll have missed the might_sleep() from down_read().
238 */
239 might_sleep();
240#ifdef CONFIG_DEBUG_VM
241 if (!user_mode(regs) && !search_exception_tables(regs->pc))
242 goto no_context;
243#endif
244 }
245
246 fault = __do_page_fault(mm, addr, esr, flags, tsk);
247
248 /*
249 * If we need to retry but a fatal signal is pending, handle the
250 * signal first. We do not need to release the mmap_sem because it
251 * would already be released in __lock_page_or_retry in mm/filemap.c.
252 */
253 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
254 return 0;
255
256 /*
257 * Major/minor page fault accounting is only done on the initial
258 * attempt. If we go through a retry, it is extremely likely that the
259 * page will be found in page cache at that point.
260 */
261
262 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
263 if (flags & FAULT_FLAG_ALLOW_RETRY) {
264 if (fault & VM_FAULT_MAJOR) {
265 tsk->maj_flt++;
266 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
267 addr);
268 } else {
269 tsk->min_flt++;
270 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
271 addr);
272 }
273 if (fault & VM_FAULT_RETRY) {
274 /*
275 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
276 * starvation.
277 */
278 flags &= ~FAULT_FLAG_ALLOW_RETRY;
279 goto retry;
280 }
281 }
282
283 up_read(&mm->mmap_sem);
284
285 /*
286 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
287 */
288 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
289 VM_FAULT_BADACCESS))))
290 return 0;
291
292 if (fault & VM_FAULT_OOM) {
293 /*
294 * We ran out of memory, call the OOM killer, and return to
295 * userspace (which will retry the fault, or kill us if we got
296 * oom-killed).
297 */
298 pagefault_out_of_memory();
299 return 0;
300 }
301
302 /*
303 * If we are in kernel mode at this point, we have no context to
304 * handle this fault with.
305 */
306 if (!user_mode(regs))
307 goto no_context;
308
309 if (fault & VM_FAULT_SIGBUS) {
310 /*
311 * We had some memory, but were unable to successfully fix up
312 * this page fault.
313 */
314 sig = SIGBUS;
315 code = BUS_ADRERR;
316 } else {
317 /*
318 * Something tried to access memory that isn't in our memory
319 * map.
320 */
321 sig = SIGSEGV;
322 code = fault == VM_FAULT_BADACCESS ?
323 SEGV_ACCERR : SEGV_MAPERR;
324 }
325
326 __do_user_fault(tsk, addr, esr, sig, code, regs);
327 return 0;
328
329no_context:
330 __do_kernel_fault(mm, addr, esr, regs);
331 return 0;
332}
333
334/*
335 * First Level Translation Fault Handler
336 *
337 * We enter here because the first level page table doesn't contain a valid
338 * entry for the address.
339 *
340 * If the address is in kernel space (>= TASK_SIZE), then we are probably
341 * faulting in the vmalloc() area.
342 *
343 * If the init_task's first level page tables contains the relevant entry, we
344 * copy the it to this task. If not, we send the process a signal, fixup the
345 * exception, or oops the kernel.
346 *
347 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
348 * or a critical region, and should only copy the information from the master
349 * page table, nothing more.
350 */
351static int __kprobes do_translation_fault(unsigned long addr,
352 unsigned int esr,
353 struct pt_regs *regs)
354{
355 if (addr < TASK_SIZE)
356 return do_page_fault(addr, esr, regs);
357
358 do_bad_area(addr, esr, regs);
359 return 0;
360}
361
362/*
363 * Some section permission faults need to be handled gracefully. They can
364 * happen due to a __{get,put}_user during an oops.
365 */
366static int do_sect_fault(unsigned long addr, unsigned int esr,
367 struct pt_regs *regs)
368{
369 do_bad_area(addr, esr, regs);
370 return 0;
371}
372
373/*
374 * This abort handler always returns "fault".
375 */
376static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
377{
378 return 1;
379}
380
381static struct fault_info {
382 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
383 int sig;
384 int code;
385 const char *name;
386} fault_info[] = {
387 { do_bad, SIGBUS, 0, "ttbr address size fault" },
388 { do_bad, SIGBUS, 0, "level 1 address size fault" },
389 { do_bad, SIGBUS, 0, "level 2 address size fault" },
390 { do_bad, SIGBUS, 0, "level 3 address size fault" },
391 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" },
392 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
394 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
395 { do_bad, SIGBUS, 0, "reserved access flag fault" },
396 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
397 { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
399 { do_bad, SIGBUS, 0, "reserved permission fault" },
400 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
401 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
403 { do_bad, SIGBUS, 0, "synchronous external abort" },
404 { do_bad, SIGBUS, 0, "asynchronous external abort" },
405 { do_bad, SIGBUS, 0, "unknown 18" },
406 { do_bad, SIGBUS, 0, "unknown 19" },
407 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
409 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
411 { do_bad, SIGBUS, 0, "synchronous parity error" },
412 { do_bad, SIGBUS, 0, "asynchronous parity error" },
413 { do_bad, SIGBUS, 0, "unknown 26" },
414 { do_bad, SIGBUS, 0, "unknown 27" },
415 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
419 { do_bad, SIGBUS, 0, "unknown 32" },
420 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
421 { do_bad, SIGBUS, 0, "debug event" },
422 { do_bad, SIGBUS, 0, "unknown 35" },
423 { do_bad, SIGBUS, 0, "unknown 36" },
424 { do_bad, SIGBUS, 0, "unknown 37" },
425 { do_bad, SIGBUS, 0, "unknown 38" },
426 { do_bad, SIGBUS, 0, "unknown 39" },
427 { do_bad, SIGBUS, 0, "unknown 40" },
428 { do_bad, SIGBUS, 0, "unknown 41" },
429 { do_bad, SIGBUS, 0, "unknown 42" },
430 { do_bad, SIGBUS, 0, "unknown 43" },
431 { do_bad, SIGBUS, 0, "unknown 44" },
432 { do_bad, SIGBUS, 0, "unknown 45" },
433 { do_bad, SIGBUS, 0, "unknown 46" },
434 { do_bad, SIGBUS, 0, "unknown 47" },
435 { do_bad, SIGBUS, 0, "unknown 48" },
436 { do_bad, SIGBUS, 0, "unknown 49" },
437 { do_bad, SIGBUS, 0, "unknown 50" },
438 { do_bad, SIGBUS, 0, "unknown 51" },
439 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
440 { do_bad, SIGBUS, 0, "unknown 53" },
441 { do_bad, SIGBUS, 0, "unknown 54" },
442 { do_bad, SIGBUS, 0, "unknown 55" },
443 { do_bad, SIGBUS, 0, "unknown 56" },
444 { do_bad, SIGBUS, 0, "unknown 57" },
445 { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
446 { do_bad, SIGBUS, 0, "unknown 59" },
447 { do_bad, SIGBUS, 0, "unknown 60" },
448 { do_bad, SIGBUS, 0, "unknown 61" },
449 { do_bad, SIGBUS, 0, "unknown 62" },
450 { do_bad, SIGBUS, 0, "unknown 63" },
451};
452
453/*
454 * Dispatch a data abort to the relevant handler.
455 */
456asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
457 struct pt_regs *regs)
458{
459 const struct fault_info *inf = fault_info + (esr & 63);
460 struct siginfo info;
461
462 if (!inf->fn(addr, esr, regs))
463 return;
464
465 pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
466 inf->name, esr, addr);
467
468 info.si_signo = inf->sig;
469 info.si_errno = 0;
470 info.si_code = inf->code;
471 info.si_addr = (void __user *)addr;
472 arm64_notify_die("", regs, &info, esr);
473}
474
475/*
476 * Handle stack alignment exceptions.
477 */
478asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
479 unsigned int esr,
480 struct pt_regs *regs)
481{
482 struct siginfo info;
483
484 info.si_signo = SIGBUS;
485 info.si_errno = 0;
486 info.si_code = BUS_ADRALN;
487 info.si_addr = (void __user *)addr;
488 arm64_notify_die("", regs, &info, esr);
489}
490
491static struct fault_info debug_fault_info[] = {
492 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
493 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
494 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
495 { do_bad, SIGBUS, 0, "unknown 3" },
496 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
497 { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
498 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
499 { do_bad, SIGBUS, 0, "unknown 7" },
500};
501
502void __init hook_debug_fault_code(int nr,
503 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
504 int sig, int code, const char *name)
505{
506 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
507
508 debug_fault_info[nr].fn = fn;
509 debug_fault_info[nr].sig = sig;
510 debug_fault_info[nr].code = code;
511 debug_fault_info[nr].name = name;
512}
513
514asmlinkage int __exception do_debug_exception(unsigned long addr,
515 unsigned int esr,
516 struct pt_regs *regs)
517{
518 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
519 struct siginfo info;
520
521 if (!inf->fn(addr, esr, regs))
522 return 1;
523
524 pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
525 inf->name, esr, addr);
526
527 info.si_signo = inf->sig;
528 info.si_errno = 0;
529 info.si_code = inf->code;
530 info.si_addr = (void __user *)addr;
531 arm64_notify_die("", regs, &info, esr);
532
533 return 0;
534}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
new file mode 100644
index 000000000000..c144adb1682f
--- /dev/null
+++ b/arch/arm64/mm/flush.c
@@ -0,0 +1,135 @@
1/*
2 * Based on arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/mm.h>
22#include <linux/pagemap.h>
23
24#include <asm/cacheflush.h>
25#include <asm/cachetype.h>
26#include <asm/tlbflush.h>
27
28#include "mm.h"
29
30void flush_cache_mm(struct mm_struct *mm)
31{
32}
33
34void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
35 unsigned long end)
36{
37 if (vma->vm_flags & VM_EXEC)
38 __flush_icache_all();
39}
40
41void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
42 unsigned long pfn)
43{
44}
45
46static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
47 unsigned long uaddr, void *kaddr,
48 unsigned long len)
49{
50 if (vma->vm_flags & VM_EXEC) {
51 unsigned long addr = (unsigned long)kaddr;
52 if (icache_is_aliasing()) {
53 __flush_dcache_area(kaddr, len);
54 __flush_icache_all();
55 } else {
56 flush_icache_range(addr, addr + len);
57 }
58 }
59}
60
61/*
62 * Copy user data from/to a page which is mapped into a different processes
63 * address space. Really, we want to allow our "user space" model to handle
64 * this.
65 *
66 * Note that this code needs to run on the current CPU.
67 */
68void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
69 unsigned long uaddr, void *dst, const void *src,
70 unsigned long len)
71{
72#ifdef CONFIG_SMP
73 preempt_disable();
74#endif
75 memcpy(dst, src, len);
76 flush_ptrace_access(vma, page, uaddr, dst, len);
77#ifdef CONFIG_SMP
78 preempt_enable();
79#endif
80}
81
82void __flush_dcache_page(struct page *page)
83{
84 __flush_dcache_area(page_address(page), PAGE_SIZE);
85}
86
87void __sync_icache_dcache(pte_t pte, unsigned long addr)
88{
89 unsigned long pfn;
90 struct page *page;
91
92 pfn = pte_pfn(pte);
93 if (!pfn_valid(pfn))
94 return;
95
96 page = pfn_to_page(pfn);
97 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
98 __flush_dcache_page(page);
99 __flush_icache_all();
100 } else if (icache_is_aivivt()) {
101 __flush_icache_all();
102 }
103}
104
105/*
106 * Ensure cache coherency between kernel mapping and userspace mapping of this
107 * page.
108 */
109void flush_dcache_page(struct page *page)
110{
111 struct address_space *mapping;
112
113 /*
114 * The zero page is never written to, so never has any dirty cache
115 * lines, and therefore never needs to be flushed.
116 */
117 if (page == ZERO_PAGE(0))
118 return;
119
120 mapping = page_mapping(page);
121 if (mapping && mapping_mapped(mapping)) {
122 __flush_dcache_page(page);
123 __flush_icache_all();
124 set_bit(PG_dcache_clean, &page->flags);
125 } else {
126 clear_bit(PG_dcache_clean, &page->flags);
127 }
128}
129EXPORT_SYMBOL(flush_dcache_page);
130
131/*
132 * Additional functions defined in assembly.
133 */
134EXPORT_SYMBOL(flush_cache_all);
135EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
new file mode 100644
index 000000000000..5f719ba949bc
--- /dev/null
+++ b/arch/arm64/mm/init.c
@@ -0,0 +1,437 @@
1/*
2 * Based on arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/errno.h>
23#include <linux/swap.h>
24#include <linux/init.h>
25#include <linux/bootmem.h>
26#include <linux/mman.h>
27#include <linux/nodemask.h>
28#include <linux/initrd.h>
29#include <linux/gfp.h>
30#include <linux/memblock.h>
31#include <linux/sort.h>
32#include <linux/of_fdt.h>
33
34#include <asm/prom.h>
35#include <asm/sections.h>
36#include <asm/setup.h>
37#include <asm/sizes.h>
38#include <asm/tlb.h>
39
40#include "mm.h"
41
42static unsigned long phys_initrd_start __initdata = 0;
43static unsigned long phys_initrd_size __initdata = 0;
44
45phys_addr_t memstart_addr __read_mostly = 0;
46
47void __init early_init_dt_setup_initrd_arch(unsigned long start,
48 unsigned long end)
49{
50 phys_initrd_start = start;
51 phys_initrd_size = end - start;
52}
53
54static int __init early_initrd(char *p)
55{
56 unsigned long start, size;
57 char *endp;
58
59 start = memparse(p, &endp);
60 if (*endp == ',') {
61 size = memparse(endp + 1, NULL);
62
63 phys_initrd_start = start;
64 phys_initrd_size = size;
65 }
66 return 0;
67}
68early_param("initrd", early_initrd);
69
70#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
71
72static void __init zone_sizes_init(unsigned long min, unsigned long max)
73{
74 struct memblock_region *reg;
75 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
76 unsigned long max_dma32 = min;
77
78 memset(zone_size, 0, sizeof(zone_size));
79
80#ifdef CONFIG_ZONE_DMA32
81 /* 4GB maximum for 32-bit only capable devices */
82 max_dma32 = min(max, MAX_DMA32_PFN);
83 zone_size[ZONE_DMA32] = max_dma32 - min;
84#endif
85 zone_size[ZONE_NORMAL] = max - max_dma32;
86
87 memcpy(zhole_size, zone_size, sizeof(zhole_size));
88
89 for_each_memblock(memory, reg) {
90 unsigned long start = memblock_region_memory_base_pfn(reg);
91 unsigned long end = memblock_region_memory_end_pfn(reg);
92
93 if (start >= max)
94 continue;
95#ifdef CONFIG_ZONE_DMA32
96 if (start < max_dma32) {
97 unsigned long dma_end = min(end, max_dma32);
98 zhole_size[ZONE_DMA32] -= dma_end - start;
99 }
100#endif
101 if (end > max_dma32) {
102 unsigned long normal_end = min(end, max);
103 unsigned long normal_start = max(start, max_dma32);
104 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
105 }
106 }
107
108 free_area_init_node(0, zone_size, min, zhole_size);
109}
110
111#ifdef CONFIG_HAVE_ARCH_PFN_VALID
112int pfn_valid(unsigned long pfn)
113{
114 return memblock_is_memory(pfn << PAGE_SHIFT);
115}
116EXPORT_SYMBOL(pfn_valid);
117#endif
118
119#ifndef CONFIG_SPARSEMEM
120static void arm64_memory_present(void)
121{
122}
123#else
124static void arm64_memory_present(void)
125{
126 struct memblock_region *reg;
127
128 for_each_memblock(memory, reg)
129 memory_present(0, memblock_region_memory_base_pfn(reg),
130 memblock_region_memory_end_pfn(reg));
131}
132#endif
133
134void __init arm64_memblock_init(void)
135{
136 u64 *reserve_map, base, size;
137
138 /* Register the kernel text, kernel data and initrd with memblock */
139 memblock_reserve(__pa(_text), _end - _text);
140#ifdef CONFIG_BLK_DEV_INITRD
141 if (phys_initrd_size) {
142 memblock_reserve(phys_initrd_start, phys_initrd_size);
143
144 /* Now convert initrd to virtual addresses */
145 initrd_start = __phys_to_virt(phys_initrd_start);
146 initrd_end = initrd_start + phys_initrd_size;
147 }
148#endif
149
150 /*
151 * Reserve the page tables. These are already in use,
152 * and can only be in node 0.
153 */
154 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
155 memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
156
157 /* Reserve the dtb region */
158 memblock_reserve(virt_to_phys(initial_boot_params),
159 be32_to_cpu(initial_boot_params->totalsize));
160
161 /*
162 * Process the reserve map. This will probably overlap the initrd
163 * and dtb locations which are already reserved, but overlapping
164 * doesn't hurt anything
165 */
166 reserve_map = ((void*)initial_boot_params) +
167 be32_to_cpu(initial_boot_params->off_mem_rsvmap);
168 while (1) {
169 base = be64_to_cpup(reserve_map++);
170 size = be64_to_cpup(reserve_map++);
171 if (!size)
172 break;
173 memblock_reserve(base, size);
174 }
175
176 memblock_allow_resize();
177 memblock_dump_all();
178}
179
180void __init bootmem_init(void)
181{
182 unsigned long min, max;
183
184 min = PFN_UP(memblock_start_of_DRAM());
185 max = PFN_DOWN(memblock_end_of_DRAM());
186
187 /*
188 * Sparsemem tries to allocate bootmem in memory_present(), so must be
189 * done after the fixed reservations.
190 */
191 arm64_memory_present();
192
193 sparse_init();
194 zone_sizes_init(min, max);
195
196 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
197 max_pfn = max_low_pfn = max;
198}
199
200static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201{
202 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203
204 for (; pfn < end; pfn++) {
205 struct page *page = pfn_to_page(pfn);
206 ClearPageReserved(page);
207 init_page_count(page);
208 __free_page(page);
209 pages++;
210 }
211
212 if (size && s)
213 pr_info("Freeing %s memory: %dK\n", s, size);
214
215 return pages;
216}
217
218/*
219 * Poison init memory with an undefined instruction (0x0).
220 */
221static inline void poison_init_mem(void *s, size_t count)
222{
223 memset(s, 0, count);
224}
225
226#ifndef CONFIG_SPARSEMEM_VMEMMAP
227static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
228{
229 struct page *start_pg, *end_pg;
230 unsigned long pg, pgend;
231
232 /*
233 * Convert start_pfn/end_pfn to a struct page pointer.
234 */
235 start_pg = pfn_to_page(start_pfn - 1) + 1;
236 end_pg = pfn_to_page(end_pfn - 1) + 1;
237
238 /*
239 * Convert to physical addresses, and round start upwards and end
240 * downwards.
241 */
242 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
243 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
244
245 /*
246 * If there are free pages between these, free the section of the
247 * memmap array.
248 */
249 if (pg < pgend)
250 free_bootmem(pg, pgend - pg);
251}
252
253/*
254 * The mem_map array can get very big. Free the unused area of the memory map.
255 */
256static void __init free_unused_memmap(void)
257{
258 unsigned long start, prev_end = 0;
259 struct memblock_region *reg;
260
261 for_each_memblock(memory, reg) {
262 start = __phys_to_pfn(reg->base);
263
264#ifdef CONFIG_SPARSEMEM
265 /*
266 * Take care not to free memmap entries that don't exist due
267 * to SPARSEMEM sections which aren't present.
268 */
269 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
270#endif
271 /*
272 * If we had a previous bank, and there is a space between the
273 * current bank and the previous, free it.
274 */
275 if (prev_end && prev_end < start)
276 free_memmap(prev_end, start);
277
278 /*
279 * Align up here since the VM subsystem insists that the
280 * memmap entries are valid from the bank end aligned to
281 * MAX_ORDER_NR_PAGES.
282 */
283 prev_end = ALIGN(start + __phys_to_pfn(reg->size),
284 MAX_ORDER_NR_PAGES);
285 }
286
287#ifdef CONFIG_SPARSEMEM
288 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
289 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
290#endif
291}
292#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
293
294/*
295 * mem_init() marks the free areas in the mem_map and tells us how much memory
296 * is free. This is done after various parts of the system have claimed their
297 * memory after the kernel image.
298 */
299void __init mem_init(void)
300{
301 unsigned long reserved_pages, free_pages;
302 struct memblock_region *reg;
303
304#if CONFIG_SWIOTLB
305 extern void __init arm64_swiotlb_init(size_t max_size);
306 arm64_swiotlb_init(max_pfn << (PAGE_SHIFT - 1));
307#endif
308
309 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
310
311#ifndef CONFIG_SPARSEMEM_VMEMMAP
312 /* this will put all unused low memory onto the freelists */
313 free_unused_memmap();
314#endif
315
316 totalram_pages += free_all_bootmem();
317
318 reserved_pages = free_pages = 0;
319
320 for_each_memblock(memory, reg) {
321 unsigned int pfn1, pfn2;
322 struct page *page, *end;
323
324 pfn1 = __phys_to_pfn(reg->base);
325 pfn2 = pfn1 + __phys_to_pfn(reg->size);
326
327 page = pfn_to_page(pfn1);
328 end = pfn_to_page(pfn2 - 1) + 1;
329
330 do {
331 if (PageReserved(page))
332 reserved_pages++;
333 else if (!page_count(page))
334 free_pages++;
335 page++;
336 } while (page < end);
337 }
338
339 /*
340 * Since our memory may not be contiguous, calculate the real number
341 * of pages we have in this system.
342 */
343 pr_info("Memory:");
344 num_physpages = 0;
345 for_each_memblock(memory, reg) {
346 unsigned long pages = memblock_region_memory_end_pfn(reg) -
347 memblock_region_memory_base_pfn(reg);
348 num_physpages += pages;
349 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
350 }
351 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
352
353 pr_notice("Memory: %luk/%luk available, %luk reserved\n",
354 nr_free_pages() << (PAGE_SHIFT-10),
355 free_pages << (PAGE_SHIFT-10),
356 reserved_pages << (PAGE_SHIFT-10));
357
358#define MLK(b, t) b, t, ((t) - (b)) >> 10
359#define MLM(b, t) b, t, ((t) - (b)) >> 20
360#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
361
362 pr_notice("Virtual kernel memory layout:\n"
363 " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
364#ifdef CONFIG_SPARSEMEM_VMEMMAP
365 " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
366#endif
367 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
368 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
369 " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
370 " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
371 " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
372 MLM(VMALLOC_START, VMALLOC_END),
373#ifdef CONFIG_SPARSEMEM_VMEMMAP
374 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
375 (unsigned long)virt_to_page(high_memory)),
376#endif
377 MLM(MODULES_VADDR, MODULES_END),
378 MLM(PAGE_OFFSET, (unsigned long)high_memory),
379
380 MLK_ROUNDUP(__init_begin, __init_end),
381 MLK_ROUNDUP(_text, _etext),
382 MLK_ROUNDUP(_sdata, _edata));
383
384#undef MLK
385#undef MLM
386#undef MLK_ROUNDUP
387
388 /*
389 * Check boundaries twice: Some fundamental inconsistencies can be
390 * detected at build time already.
391 */
392#ifdef CONFIG_COMPAT
393 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
394#endif
395 BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
396 BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
397
398 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
399 extern int sysctl_overcommit_memory;
400 /*
401 * On a machine this small we won't get anywhere without
402 * overcommit, so turn it on by default.
403 */
404 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
405 }
406}
407
408void free_initmem(void)
409{
410 poison_init_mem(__init_begin, __init_end - __init_begin);
411 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
412 __phys_to_pfn(__pa(__init_end)),
413 "init");
414}
415
416#ifdef CONFIG_BLK_DEV_INITRD
417
418static int keep_initrd;
419
420void free_initrd_mem(unsigned long start, unsigned long end)
421{
422 if (!keep_initrd) {
423 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
424 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
425 __phys_to_pfn(__pa(end)),
426 "initrd");
427 }
428}
429
430static int __init keepinitrd_setup(char *__unused)
431{
432 keep_initrd = 1;
433 return 1;
434}
435
436__setup("keepinitrd", keepinitrd_setup);
437#endif
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
new file mode 100644
index 000000000000..1725cd6db37a
--- /dev/null
+++ b/arch/arm64/mm/ioremap.c
@@ -0,0 +1,84 @@
1/*
2 * Based on arch/arm/mm/ioremap.c
3 *
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * Hacked for ARM by Phil Blundell <philb@gnu.org>
6 * Hacked to allow all architectures to build, and various cleanups
7 * by Russell King
8 * Copyright (C) 2012 ARM Ltd.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/export.h>
24#include <linux/mm.h>
25#include <linux/vmalloc.h>
26#include <linux/io.h>
27
28static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
29 pgprot_t prot, void *caller)
30{
31 unsigned long last_addr;
32 unsigned long offset = phys_addr & ~PAGE_MASK;
33 int err;
34 unsigned long addr;
35 struct vm_struct *area;
36
37 /*
38 * Page align the mapping address and size, taking account of any
39 * offset.
40 */
41 phys_addr &= PAGE_MASK;
42 size = PAGE_ALIGN(size + offset);
43
44 /*
45 * Don't allow wraparound, zero size or outside PHYS_MASK.
46 */
47 last_addr = phys_addr + size - 1;
48 if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
49 return NULL;
50
51 /*
52 * Don't allow RAM to be mapped.
53 */
54 if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
55 return NULL;
56
57 area = get_vm_area_caller(size, VM_IOREMAP, caller);
58 if (!area)
59 return NULL;
60 addr = (unsigned long)area->addr;
61
62 err = ioremap_page_range(addr, addr + size, phys_addr, prot);
63 if (err) {
64 vunmap((void *)addr);
65 return NULL;
66 }
67
68 return (void __iomem *)(offset + addr);
69}
70
71void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
72{
73 return __ioremap_caller(phys_addr, size, prot,
74 __builtin_return_address(0));
75}
76EXPORT_SYMBOL(__ioremap);
77
78void __iounmap(volatile void __iomem *io_addr)
79{
80 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
81
82 vunmap(addr);
83}
84EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
new file mode 100644
index 000000000000..d8d6e7851c14
--- /dev/null
+++ b/arch/arm64/mm/mm.h
@@ -0,0 +1,2 @@
1extern void __flush_dcache_page(struct page *page);
2extern void __init bootmem_init(void);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
new file mode 100644
index 000000000000..7c7be7855638
--- /dev/null
+++ b/arch/arm64/mm/mmap.c
@@ -0,0 +1,144 @@
1/*
2 * Based on arch/arm/mm/mmap.c
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/elf.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/mman.h>
23#include <linux/export.h>
24#include <linux/shm.h>
25#include <linux/sched.h>
26#include <linux/io.h>
27#include <linux/personality.h>
28#include <linux/random.h>
29
30#include <asm/cputype.h>
31
32/*
33 * Leave enough space between the mmap area and the stack to honour ulimit in
34 * the face of randomisation.
35 */
36#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
37#define MAX_GAP (STACK_TOP/6*5)
38
39static int mmap_is_legacy(void)
40{
41 if (current->personality & ADDR_COMPAT_LAYOUT)
42 return 1;
43
44 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
45 return 1;
46
47 return sysctl_legacy_va_layout;
48}
49
50/*
51 * Since get_random_int() returns the same value within a 1 jiffy window, we
52 * will almost always get the same randomisation for the stack and mmap
53 * region. This will mean the relative distance between stack and mmap will be
54 * the same.
55 *
56 * To avoid this we can shift the randomness by 1 bit.
57 */
58static unsigned long mmap_rnd(void)
59{
60 unsigned long rnd = 0;
61
62 if (current->flags & PF_RANDOMIZE)
63 rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
64
65 return rnd << (PAGE_SHIFT + 1);
66}
67
68static unsigned long mmap_base(void)
69{
70 unsigned long gap = rlimit(RLIMIT_STACK);
71
72 if (gap < MIN_GAP)
73 gap = MIN_GAP;
74 else if (gap > MAX_GAP)
75 gap = MAX_GAP;
76
77 return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
78}
79
80/*
81 * This function, called very early during the creation of a new process VM
82 * image, sets up which VM layout function to use:
83 */
84void arch_pick_mmap_layout(struct mm_struct *mm)
85{
86 /*
87 * Fall back to the standard layout if the personality bit is set, or
88 * if the expected stack growth is unlimited:
89 */
90 if (mmap_is_legacy()) {
91 mm->mmap_base = TASK_UNMAPPED_BASE;
92 mm->get_unmapped_area = arch_get_unmapped_area;
93 mm->unmap_area = arch_unmap_area;
94 } else {
95 mm->mmap_base = mmap_base();
96 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
97 mm->unmap_area = arch_unmap_area_topdown;
98 }
99}
100EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
101
102
103/*
104 * You really shouldn't be using read() or write() on /dev/mem. This might go
105 * away in the future.
106 */
107int valid_phys_addr_range(unsigned long addr, size_t size)
108{
109 if (addr < PHYS_OFFSET)
110 return 0;
111 if (addr + size > __pa(high_memory - 1) + 1)
112 return 0;
113
114 return 1;
115}
116
117/*
118 * Do not allow /dev/mem mappings beyond the supported physical range.
119 */
120int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
121{
122 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
123}
124
125#ifdef CONFIG_STRICT_DEVMEM
126
127#include <linux/ioport.h>
128
129/*
130 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
131 * is valid. The argument is a physical page number. We mimic x86 here by
132 * disallowing access to system RAM as well as device-exclusive MMIO regions.
133 * This effectively disable read()/write() on /dev/mem.
134 */
135int devmem_is_allowed(unsigned long pfn)
136{
137 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
138 return 0;
139 if (!page_is_ram(pfn))
140 return 1;
141 return 0;
142}
143
144#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
new file mode 100644
index 000000000000..a6885d896ab6
--- /dev/null
+++ b/arch/arm64/mm/mmu.c
@@ -0,0 +1,395 @@
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/mman.h>
25#include <linux/nodemask.h>
26#include <linux/memblock.h>
27#include <linux/fs.h>
28
29#include <asm/cputype.h>
30#include <asm/sections.h>
31#include <asm/setup.h>
32#include <asm/sizes.h>
33#include <asm/tlb.h>
34#include <asm/mmu_context.h>
35
36#include "mm.h"
37
38/*
39 * Empty_zero_page is a special page that is used for zero-initialized data
40 * and COW.
41 */
42struct page *empty_zero_page;
43EXPORT_SYMBOL(empty_zero_page);
44
45pgprot_t pgprot_default;
46EXPORT_SYMBOL(pgprot_default);
47
48static pmdval_t prot_sect_kernel;
49
50struct cachepolicy {
51 const char policy[16];
52 u64 mair;
53 u64 tcr;
54};
55
56static struct cachepolicy cache_policies[] __initdata = {
57 {
58 .policy = "uncached",
59 .mair = 0x44, /* inner, outer non-cacheable */
60 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
61 }, {
62 .policy = "writethrough",
63 .mair = 0xaa, /* inner, outer write-through, read-allocate */
64 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
65 }, {
66 .policy = "writeback",
67 .mair = 0xee, /* inner, outer write-back, read-allocate */
68 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
69 }
70};
71
72/*
73 * These are useful for identifying cache coherency problems by allowing the
74 * cache or the cache and writebuffer to be turned off. It changes the Normal
75 * memory caching attributes in the MAIR_EL1 register.
76 */
77static int __init early_cachepolicy(char *p)
78{
79 int i;
80 u64 tmp;
81
82 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
83 int len = strlen(cache_policies[i].policy);
84
85 if (memcmp(p, cache_policies[i].policy, len) == 0)
86 break;
87 }
88 if (i == ARRAY_SIZE(cache_policies)) {
89 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
90 return 0;
91 }
92
93 flush_cache_all();
94
95 /*
96 * Modify MT_NORMAL attributes in MAIR_EL1.
97 */
98 asm volatile(
99 " mrs %0, mair_el1\n"
100 " bfi %0, %1, #%2, #8\n"
101 " msr mair_el1, %0\n"
102 " isb\n"
103 : "=&r" (tmp)
104 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
105
106 /*
107 * Modify TCR PTW cacheability attributes.
108 */
109 asm volatile(
110 " mrs %0, tcr_el1\n"
111 " bic %0, %0, %2\n"
112 " orr %0, %0, %1\n"
113 " msr tcr_el1, %0\n"
114 " isb\n"
115 : "=&r" (tmp)
116 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
117
118 flush_cache_all();
119
120 return 0;
121}
122early_param("cachepolicy", early_cachepolicy);
123
124/*
125 * Adjust the PMD section entries according to the CPU in use.
126 */
127static void __init init_mem_pgprot(void)
128{
129 pteval_t default_pgprot;
130 int i;
131
132 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
133 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
134
135#ifdef CONFIG_SMP
136 /*
137 * Mark memory with the "shared" attribute for SMP systems
138 */
139 default_pgprot |= PTE_SHARED;
140 prot_sect_kernel |= PMD_SECT_S;
141#endif
142
143 for (i = 0; i < 16; i++) {
144 unsigned long v = pgprot_val(protection_map[i]);
145 protection_map[i] = __pgprot(v | default_pgprot);
146 }
147
148 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
149}
150
151pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
152 unsigned long size, pgprot_t vma_prot)
153{
154 if (!pfn_valid(pfn))
155 return pgprot_noncached(vma_prot);
156 else if (file->f_flags & O_SYNC)
157 return pgprot_writecombine(vma_prot);
158 return vma_prot;
159}
160EXPORT_SYMBOL(phys_mem_access_prot);
161
162static void __init *early_alloc(unsigned long sz)
163{
164 void *ptr = __va(memblock_alloc(sz, sz));
165 memset(ptr, 0, sz);
166 return ptr;
167}
168
169static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
170 unsigned long end, unsigned long pfn)
171{
172 pte_t *pte;
173
174 if (pmd_none(*pmd)) {
175 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
176 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
177 }
178 BUG_ON(pmd_bad(*pmd));
179
180 pte = pte_offset_kernel(pmd, addr);
181 do {
182 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
183 pfn++;
184 } while (pte++, addr += PAGE_SIZE, addr != end);
185}
186
187static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
188 unsigned long end, phys_addr_t phys)
189{
190 pmd_t *pmd;
191 unsigned long next;
192
193 /*
194 * Check for initial section mappings in the pgd/pud and remove them.
195 */
196 if (pud_none(*pud) || pud_bad(*pud)) {
197 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
198 pud_populate(&init_mm, pud, pmd);
199 }
200
201 pmd = pmd_offset(pud, addr);
202 do {
203 next = pmd_addr_end(addr, end);
204 /* try section mapping first */
205 if (((addr | next | phys) & ~SECTION_MASK) == 0)
206 set_pmd(pmd, __pmd(phys | prot_sect_kernel));
207 else
208 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
209 phys += next - addr;
210 } while (pmd++, addr = next, addr != end);
211}
212
213static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
214 unsigned long end, unsigned long phys)
215{
216 pud_t *pud = pud_offset(pgd, addr);
217 unsigned long next;
218
219 do {
220 next = pud_addr_end(addr, end);
221 alloc_init_pmd(pud, addr, next, phys);
222 phys += next - addr;
223 } while (pud++, addr = next, addr != end);
224}
225
226/*
227 * Create the page directory entries and any necessary page tables for the
228 * mapping specified by 'md'.
229 */
230static void __init create_mapping(phys_addr_t phys, unsigned long virt,
231 phys_addr_t size)
232{
233 unsigned long addr, length, end, next;
234 pgd_t *pgd;
235
236 if (virt < VMALLOC_START) {
237 pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
238 phys, virt);
239 return;
240 }
241
242 addr = virt & PAGE_MASK;
243 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
244
245 pgd = pgd_offset_k(addr);
246 end = addr + length;
247 do {
248 next = pgd_addr_end(addr, end);
249 alloc_init_pud(pgd, addr, next, phys);
250 phys += next - addr;
251 } while (pgd++, addr = next, addr != end);
252}
253
254static void __init map_mem(void)
255{
256 struct memblock_region *reg;
257
258 /* map all the memory banks */
259 for_each_memblock(memory, reg) {
260 phys_addr_t start = reg->base;
261 phys_addr_t end = start + reg->size;
262
263 if (start >= end)
264 break;
265
266 create_mapping(start, __phys_to_virt(start), end - start);
267 }
268}
269
270/*
271 * paging_init() sets up the page tables, initialises the zone memory
272 * maps and sets up the zero page.
273 */
274void __init paging_init(void)
275{
276 void *zero_page;
277
278 /*
279 * Maximum PGDIR_SIZE addressable via the initial direct kernel
280 * mapping in swapper_pg_dir.
281 */
282 memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
283
284 init_mem_pgprot();
285 map_mem();
286
287 /*
288 * Finally flush the caches and tlb to ensure that we're in a
289 * consistent state.
290 */
291 flush_cache_all();
292 flush_tlb_all();
293
294 /* allocate the zero page. */
295 zero_page = early_alloc(PAGE_SIZE);
296
297 bootmem_init();
298
299 empty_zero_page = virt_to_page(zero_page);
300 __flush_dcache_page(empty_zero_page);
301
302 /*
303 * TTBR0 is only used for the identity mapping at this stage. Make it
304 * point to zero page to avoid speculatively fetching new entries.
305 */
306 cpu_set_reserved_ttbr0();
307 flush_tlb_all();
308}
309
310/*
311 * Enable the identity mapping to allow the MMU disabling.
312 */
313void setup_mm_for_reboot(void)
314{
315 cpu_switch_mm(idmap_pg_dir, &init_mm);
316 flush_tlb_all();
317}
318
319/*
320 * Check whether a kernel address is valid (derived from arch/x86/).
321 */
322int kern_addr_valid(unsigned long addr)
323{
324 pgd_t *pgd;
325 pud_t *pud;
326 pmd_t *pmd;
327 pte_t *pte;
328
329 if ((((long)addr) >> VA_BITS) != -1UL)
330 return 0;
331
332 pgd = pgd_offset_k(addr);
333 if (pgd_none(*pgd))
334 return 0;
335
336 pud = pud_offset(pgd, addr);
337 if (pud_none(*pud))
338 return 0;
339
340 pmd = pmd_offset(pud, addr);
341 if (pmd_none(*pmd))
342 return 0;
343
344 pte = pte_offset_kernel(pmd, addr);
345 if (pte_none(*pte))
346 return 0;
347
348 return pfn_valid(pte_pfn(*pte));
349}
350#ifdef CONFIG_SPARSEMEM_VMEMMAP
351#ifdef CONFIG_ARM64_64K_PAGES
352int __meminit vmemmap_populate(struct page *start_page,
353 unsigned long size, int node)
354{
355 return vmemmap_populate_basepages(start_page, size, node);
356}
357#else /* !CONFIG_ARM64_64K_PAGES */
358int __meminit vmemmap_populate(struct page *start_page,
359 unsigned long size, int node)
360{
361 unsigned long addr = (unsigned long)start_page;
362 unsigned long end = (unsigned long)(start_page + size);
363 unsigned long next;
364 pgd_t *pgd;
365 pud_t *pud;
366 pmd_t *pmd;
367
368 do {
369 next = pmd_addr_end(addr, end);
370
371 pgd = vmemmap_pgd_populate(addr, node);
372 if (!pgd)
373 return -ENOMEM;
374
375 pud = vmemmap_pud_populate(pgd, addr, node);
376 if (!pud)
377 return -ENOMEM;
378
379 pmd = pmd_offset(pud, addr);
380 if (pmd_none(*pmd)) {
381 void *p = NULL;
382
383 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
384 if (!p)
385 return -ENOMEM;
386
387 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
388 } else
389 vmemmap_verify((pte_t *)pmd, node, addr, next);
390 } while (addr = next, addr != end);
391
392 return 0;
393}
394#endif /* CONFIG_ARM64_64K_PAGES */
395#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
new file mode 100644
index 000000000000..7083cdada657
--- /dev/null
+++ b/arch/arm64/mm/pgd.c
@@ -0,0 +1,54 @@
1/*
2 * PGD allocation/freeing
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/mm.h>
21#include <linux/gfp.h>
22#include <linux/highmem.h>
23#include <linux/slab.h>
24
25#include <asm/pgalloc.h>
26#include <asm/page.h>
27#include <asm/tlbflush.h>
28
29#include "mm.h"
30
31#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
32
33pgd_t *pgd_alloc(struct mm_struct *mm)
34{
35 pgd_t *new_pgd;
36
37 if (PGD_SIZE == PAGE_SIZE)
38 new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
39 else
40 new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
41
42 if (!new_pgd)
43 return NULL;
44
45 return new_pgd;
46}
47
48void pgd_free(struct mm_struct *mm, pgd_t *pgd)
49{
50 if (PGD_SIZE == PAGE_SIZE)
51 free_page((unsigned long)pgd);
52 else
53 kfree(pgd);
54}
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
new file mode 100644
index 000000000000..8957b822010b
--- /dev/null
+++ b/arch/arm64/mm/proc-macros.S
@@ -0,0 +1,55 @@
1/*
2 * Based on arch/arm/mm/proc-macros.S
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <asm/asm-offsets.h>
20#include <asm/thread_info.h>
21
22/*
23 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
24 */
25 .macro vma_vm_mm, rd, rn
26 ldr \rd, [\rn, #VMA_VM_MM]
27 .endm
28
29/*
30 * mmid - get context id from mm pointer (mm->context.id)
31 */
32 .macro mmid, rd, rn
33 ldr \rd, [\rn, #MM_CONTEXT_ID]
34 .endm
35
36/*
37 * dcache_line_size - get the minimum D-cache line size from the CTR register.
38 */
39 .macro dcache_line_size, reg, tmp
40 mrs \tmp, ctr_el0 // read CTR
41 lsr \tmp, \tmp, #16
42 and \tmp, \tmp, #0xf // cache line size encoding
43 mov \reg, #4 // bytes per word
44 lsl \reg, \reg, \tmp // actual cache line size
45 .endm
46
47/*
48 * icache_line_size - get the minimum I-cache line size from the CTR register.
49 */
50 .macro icache_line_size, reg, tmp
51 mrs \tmp, ctr_el0 // read CTR
52 and \tmp, \tmp, #0xf // cache line size encoding
53 mov \reg, #4 // bytes per word
54 lsl \reg, \reg, \tmp // actual cache line size
55 .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
new file mode 100644
index 000000000000..f1d8b9bbfdad
--- /dev/null
+++ b/arch/arm64/mm/proc.S
@@ -0,0 +1,175 @@
1/*
2 * Based on arch/arm/mm/proc.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23#include <asm/assembler.h>
24#include <asm/asm-offsets.h>
25#include <asm/hwcap.h>
26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h>
28
29#include "proc-macros.S"
30
31#ifndef CONFIG_SMP
32/* PTWs cacheable, inner/outer WBWA not shareable */
33#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
34#else
35/* PTWs cacheable, inner/outer WBWA shareable */
36#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED
37#endif
38
39#define MAIR(attr, mt) ((attr) << ((mt) * 8))
40
41/*
42 * cpu_cache_off()
43 *
44 * Turn the CPU D-cache off.
45 */
46ENTRY(cpu_cache_off)
47 mrs x0, sctlr_el1
48 bic x0, x0, #1 << 2 // clear SCTLR.C
49 msr sctlr_el1, x0
50 isb
51 ret
52ENDPROC(cpu_cache_off)
53
54/*
55 * cpu_reset(loc)
56 *
57 * Perform a soft reset of the system. Put the CPU into the same state
58 * as it would be if it had been reset, and branch to what would be the
59 * reset vector. It must be executed with the flat identity mapping.
60 *
61 * - loc - location to jump to for soft reset
62 */
63 .align 5
64ENTRY(cpu_reset)
65 mrs x1, sctlr_el1
66 bic x1, x1, #1
67 msr sctlr_el1, x1 // disable the MMU
68 isb
69 ret x0
70ENDPROC(cpu_reset)
71
72/*
73 * cpu_do_idle()
74 *
75 * Idle the processor (wait for interrupt).
76 */
77ENTRY(cpu_do_idle)
78 dsb sy // WFI may enter a low-power mode
79 wfi
80 ret
81ENDPROC(cpu_do_idle)
82
83/*
84 * cpu_switch_mm(pgd_phys, tsk)
85 *
86 * Set the translation table base pointer to be pgd_phys.
87 *
88 * - pgd_phys - physical address of new TTB
89 */
90ENTRY(cpu_do_switch_mm)
91 mmid w1, x1 // get mm->context.id
92 bfi x0, x1, #48, #16 // set the ASID
93 msr ttbr0_el1, x0 // set TTBR0
94 isb
95 ret
96ENDPROC(cpu_do_switch_mm)
97
98cpu_name:
99 .ascii "AArch64 Processor"
100 .align
101
102 .section ".text.init", #alloc, #execinstr
103
104/*
105 * __cpu_setup
106 *
107 * Initialise the processor for turning the MMU on. Return in x0 the
108 * value of the SCTLR_EL1 register.
109 */
110ENTRY(__cpu_setup)
111 /*
112 * Preserve the link register across the function call.
113 */
114 mov x28, lr
115 bl __flush_dcache_all
116 mov lr, x28
117 ic iallu // I+BTB cache invalidate
118 dsb sy
119
120 mov x0, #3 << 20
121 msr cpacr_el1, x0 // Enable FP/ASIMD
122 mov x0, #1
123 msr oslar_el1, x0 // Set the debug OS lock
124 tlbi vmalle1is // invalidate I + D TLBs
125 /*
126 * Memory region attributes for LPAE:
127 *
128 * n = AttrIndx[2:0]
129 * n MAIR
130 * DEVICE_nGnRnE 000 00000000
131 * DEVICE_nGnRE 001 00000100
132 * DEVICE_GRE 010 00001100
133 * NORMAL_NC 011 01000100
134 * NORMAL 100 11111111
135 */
136 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
137 MAIR(0x04, MT_DEVICE_nGnRE) | \
138 MAIR(0x0c, MT_DEVICE_GRE) | \
139 MAIR(0x44, MT_NORMAL_NC) | \
140 MAIR(0xff, MT_NORMAL)
141 msr mair_el1, x5
142 /*
143 * Prepare SCTLR
144 */
145 adr x5, crval
146 ldp w5, w6, [x5]
147 mrs x0, sctlr_el1
148 bic x0, x0, x5 // clear bits
149 orr x0, x0, x6 // set bits
150 /*
151 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
152 * both user and kernel.
153 */
154 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
155 TCR_ASID16 | (1 << 31)
156#ifdef CONFIG_ARM64_64K_PAGES
157 orr x10, x10, TCR_TG0_64K
158 orr x10, x10, TCR_TG1_64K
159#endif
160 msr tcr_el1, x10
161 ret // return to head.S
162ENDPROC(__cpu_setup)
163
164 /*
165 * n n T
166 * U E WT T UD US IHBS
167 * CE0 XWHW CZ ME TEEA S
168 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
169 * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
170 * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
171 */
172 .type crval, #object
173crval:
174 .word 0x030802e2 // clear
175 .word 0x0405d11d // set
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
new file mode 100644
index 000000000000..8ae80a18e8ec
--- /dev/null
+++ b/arch/arm64/mm/tlb.S
@@ -0,0 +1,71 @@
1/*
2 * Based on arch/arm/mm/tlb.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/asm-offsets.h>
23#include <asm/page.h>
24#include <asm/tlbflush.h>
25#include "proc-macros.S"
26
27/*
28 * __cpu_flush_user_tlb_range(start, end, vma)
29 *
30 * Invalidate a range of TLB entries in the specified address space.
31 *
32 * - start - start address (may not be aligned)
33 * - end - end address (exclusive, may not be aligned)
34 * - vma - vma_struct describing address range
35 */
36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid x3, x3 // get vm_mm->context.id
39 dsb sy
40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12
42 bfi x0, x3, #48, #16 // start VA and ASID
43 bfi x1, x3, #48, #16 // end VA and ASID
441: tlbi vae1is, x0 // TLB invalidate by address and ASID
45 add x0, x0, #1
46 cmp x0, x1
47 b.lo 1b
48 dsb sy
49 ret
50ENDPROC(__cpu_flush_user_tlb_range)
51
52/*
53 * __cpu_flush_kern_tlb_range(start,end)
54 *
55 * Invalidate a range of kernel TLB entries.
56 *
57 * - start - start address (may not be aligned)
58 * - end - end address (exclusive, may not be aligned)
59 */
60ENTRY(__cpu_flush_kern_tlb_range)
61 dsb sy
62 lsr x0, x0, #12 // align address
63 lsr x1, x1, #12
641: tlbi vaae1is, x0 // TLB invalidate by address
65 add x0, x0, #1
66 cmp x0, x1
67 b.lo 1b
68 dsb sy
69 isb
70 ret
71ENDPROC(__cpu_flush_kern_tlb_range)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index d53cd0afc200..6a78073c3808 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -35,3 +35,8 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
35 default y 35 default y
36 help 36 help
37 Use the always on PRCMU Timer as sched_clock 37 Use the always on PRCMU Timer as sched_clock
38
39config CLKSRC_ARM_GENERIC
40 def_bool y if ARM64
41 help
42 This option enables support for the ARM generic timer.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index b65d0c56ab35..65919901a301 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
13obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o 13obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
14obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o 14obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
15obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o 15obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
16obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
new file mode 100644
index 000000000000..c4d9f9566c64
--- /dev/null
+++ b/drivers/clocksource/arm_generic.c
@@ -0,0 +1,232 @@
1/*
2 * Generic timers support
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/smp.h>
25#include <linux/cpu.h>
26#include <linux/jiffies.h>
27#include <linux/interrupt.h>
28#include <linux/clockchips.h>
29#include <linux/of_irq.h>
30#include <linux/io.h>
31
32#include <clocksource/arm_generic.h>
33
34#include <asm/arm_generic.h>
35
36static u32 arch_timer_rate;
37static u64 sched_clock_mult __read_mostly;
38static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt);
39static int arch_timer_ppi;
40
41static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id)
42{
43 struct clock_event_device *evt = dev_id;
44 unsigned long ctrl;
45
46 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
47 if (ctrl & ARCH_TIMER_CTRL_ISTATUS) {
48 ctrl |= ARCH_TIMER_CTRL_IMASK;
49 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
50 evt->event_handler(evt);
51 return IRQ_HANDLED;
52 }
53
54 return IRQ_NONE;
55}
56
57static void arch_timer_stop(void)
58{
59 unsigned long ctrl;
60
61 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
62 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
63 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
64}
65
66static void arch_timer_set_mode(enum clock_event_mode mode,
67 struct clock_event_device *clk)
68{
69 switch (mode) {
70 case CLOCK_EVT_MODE_UNUSED:
71 case CLOCK_EVT_MODE_SHUTDOWN:
72 arch_timer_stop();
73 break;
74 default:
75 break;
76 }
77}
78
79static int arch_timer_set_next_event(unsigned long evt,
80 struct clock_event_device *unused)
81{
82 unsigned long ctrl;
83
84 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
85 ctrl |= ARCH_TIMER_CTRL_ENABLE;
86 ctrl &= ~ARCH_TIMER_CTRL_IMASK;
87
88 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
89 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
90
91 return 0;
92}
93
94static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
95{
96 /* Let's make sure the timer is off before doing anything else */
97 arch_timer_stop();
98
99 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
100 clk->name = "arch_sys_timer";
101 clk->rating = 400;
102 clk->set_mode = arch_timer_set_mode;
103 clk->set_next_event = arch_timer_set_next_event;
104 clk->irq = arch_timer_ppi;
105 clk->cpumask = cpumask_of(smp_processor_id());
106
107 clockevents_config_and_register(clk, arch_timer_rate,
108 0xf, 0x7fffffff);
109
110 enable_percpu_irq(clk->irq, 0);
111
112 /* Ensure the physical counter is visible to userspace for the vDSO. */
113 arch_counter_enable_user_access();
114}
115
116static void __init arch_timer_calibrate(void)
117{
118 if (arch_timer_rate == 0) {
119 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
120 arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
121
122 /* Check the timer frequency. */
123 if (arch_timer_rate == 0)
124 panic("Architected timer frequency is set to zero.\n"
125 "You must set this in your .dts file\n");
126 }
127
128 /* Cache the sched_clock multiplier to save a divide in the hot path. */
129
130 sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
131
132 pr_info("Architected local timer running at %u.%02uMHz.\n",
133 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
134}
135
136static cycle_t arch_counter_read(struct clocksource *cs)
137{
138 return arch_counter_get_cntpct();
139}
140
141static struct clocksource clocksource_counter = {
142 .name = "arch_sys_counter",
143 .rating = 400,
144 .read = arch_counter_read,
145 .mask = CLOCKSOURCE_MASK(56),
146 .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
147};
148
149int read_current_timer(unsigned long *timer_value)
150{
151 *timer_value = arch_counter_get_cntpct();
152 return 0;
153}
154
155unsigned long long notrace sched_clock(void)
156{
157 return arch_counter_get_cntvct() * sched_clock_mult;
158}
159
160static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
161 unsigned long action, void *hcpu)
162{
163 int cpu = (long)hcpu;
164 struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
165
166 switch(action) {
167 case CPU_STARTING:
168 case CPU_STARTING_FROZEN:
169 arch_timer_setup(clk);
170 break;
171
172 case CPU_DYING:
173 case CPU_DYING_FROZEN:
174 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
175 clk->irq, cpu);
176 disable_percpu_irq(clk->irq);
177 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
178 break;
179 }
180
181 return NOTIFY_OK;
182}
183
184static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
185 .notifier_call = arch_timer_cpu_notify,
186};
187
188static const struct of_device_id arch_timer_of_match[] __initconst = {
189 { .compatible = "arm,armv8-timer" },
190 {},
191};
192
193int __init arm_generic_timer_init(void)
194{
195 struct device_node *np;
196 int err;
197 u32 freq;
198
199 np = of_find_matching_node(NULL, arch_timer_of_match);
200 if (!np) {
201 pr_err("arch_timer: can't find DT node\n");
202 return -ENODEV;
203 }
204
205 /* Try to determine the frequency from the device tree or CNTFRQ */
206 if (!of_property_read_u32(np, "clock-frequency", &freq))
207 arch_timer_rate = freq;
208 arch_timer_calibrate();
209
210 arch_timer_ppi = irq_of_parse_and_map(np, 0);
211 pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi);
212
213 err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq,
214 np->name, &arch_timer_evt);
215 if (err) {
216 pr_err("arch_timer: can't register interrupt %d (%d)\n",
217 arch_timer_ppi, err);
218 return err;
219 }
220
221 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
222
223 /* Calibrate the delay loop directly */
224 lpj_fine = arch_timer_rate / HZ;
225
226 /* Immediately configure the timer on the boot CPU */
227 arch_timer_setup(per_cpu_ptr(&arch_timer_evt, smp_processor_id()));
228
229 register_cpu_notifier(&arch_timer_cpu_nb);
230
231 return 0;
232}
diff --git a/fs/stat.c b/fs/stat.c
index 40780229a032..208039eec6c7 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -326,7 +326,7 @@ SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
326 326
327 327
328/* ---------- LFS-64 ----------- */ 328/* ---------- LFS-64 ----------- */
329#ifdef __ARCH_WANT_STAT64 329#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
330 330
331#ifndef INIT_STRUCT_STAT64_PADDING 331#ifndef INIT_STRUCT_STAT64_PADDING
332# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 332# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
@@ -415,7 +415,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
415 return error; 415 return error;
416 return cp_new_stat64(&stat, statbuf); 416 return cp_new_stat64(&stat, statbuf);
417} 417}
418#endif /* __ARCH_WANT_STAT64 */ 418#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
419 419
420/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 420/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
421void __inode_add_bytes(struct inode *inode, loff_t bytes) 421void __inode_add_bytes(struct inode *inode, loff_t bytes)
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
new file mode 100644
index 000000000000..90041e3a41f0
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
2#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
3
4/**
5 * __ffs - find first bit in word.
6 * @word: The word to search
7 *
8 * Undefined if no bit exists, so code should check against 0 first.
9 */
10static __always_inline unsigned long __ffs(unsigned long word)
11{
12 return __builtin_ctzl(word);
13}
14
15#endif
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
new file mode 100644
index 000000000000..0248f386635f
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
2#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
3
4/**
5 * __fls - find last (most-significant) set bit in a long word
6 * @word: the word to search
7 *
8 * Undefined if no set bit exists, so code should check against 0 first.
9 */
10static __always_inline unsigned long __fls(unsigned long word)
11{
12 return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
13}
14
15#endif
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
new file mode 100644
index 000000000000..064825829e1c
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
2#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
3
4/**
5 * ffs - find first bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as
9 * the libc and compiler builtin ffs routines, therefore
10 * differs in spirit from the above ffz (man ffs).
11 */
12static __always_inline int ffs(int x)
13{
14 return __builtin_ffs(x);
15}
16
17#endif
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
new file mode 100644
index 000000000000..eda652d0ac7f
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
2#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
3
4/**
5 * fls - find last (most-significant) bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as ffs.
9 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
10 */
11static __always_inline int fls(int x)
12{
13 return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
14}
15
16#endif
diff --git a/include/clocksource/arm_generic.h b/include/clocksource/arm_generic.h
new file mode 100644
index 000000000000..5b41b0d27f0f
--- /dev/null
+++ b/include/clocksource/arm_generic.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __CLKSOURCE_ARM_GENERIC_H
17#define __CLKSOURCE_ARM_GENERIC_H
18
19extern int arm_generic_timer_init(void);
20
21#endif
diff --git a/include/linux/elf.h b/include/linux/elf.h
index f930b1a390ab..0a05051a8924 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -389,6 +389,9 @@ typedef struct elf64_shdr {
389#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ 389#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
390#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */ 390#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
391#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ 391#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
392#define NT_ARM_TLS 0x401 /* ARM TLS register */
393#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
394#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
392 395
393 396
394/* Note header in a PT_NOTE section */ 397/* Note header in a PT_NOTE section */
diff --git a/init/Kconfig b/init/Kconfig
index 3466a6e017b7..495e6e9632db 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1216,7 +1216,8 @@ menuconfig EXPERT
1216 1216
1217config UID16 1217config UID16
1218 bool "Enable 16-bit UID system calls" if EXPERT 1218 bool "Enable 16-bit UID system calls" if EXPERT
1219 depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) 1219 depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) \
1220 || AARCH32_EMULATION
1220 default y 1221 default y
1221 help 1222 help
1222 This enables the legacy 16-bit UID syscall wrappers. 1223 This enables the legacy 16-bit UID syscall wrappers.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 81c7b1a1a307..84c76a34e41c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1544,7 +1544,7 @@ static struct ctl_table fs_table[] = {
1544 1544
1545static struct ctl_table debug_table[] = { 1545static struct ctl_table debug_table[] = {
1546#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ 1546#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \
1547 defined(CONFIG_S390) || defined(CONFIG_TILE) 1547 defined(CONFIG_S390) || defined(CONFIG_TILE) || defined(CONFIG_ARM64)
1548 { 1548 {
1549 .procname = "exception-trace", 1549 .procname = "exception-trace",
1550 .data = &show_unhandled_signals, 1550 .data = &show_unhandled_signals,
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dacbbe4d7a80..35c4565ee8fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -452,7 +452,8 @@ config SLUB_STATS
452config DEBUG_KMEMLEAK 452config DEBUG_KMEMLEAK
453 bool "Kernel memory leak detector" 453 bool "Kernel memory leak detector"
454 depends on DEBUG_KERNEL && EXPERIMENTAL && \ 454 depends on DEBUG_KERNEL && EXPERIMENTAL && \
455 (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) 455 (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || \
456 MICROBLAZE || TILE || ARM64)
456 457
457 select DEBUG_FS 458 select DEBUG_FS
458 select STACKTRACE if STACKTRACE_SUPPORT 459 select STACKTRACE if STACKTRACE_SUPPORT
@@ -753,7 +754,8 @@ config DEBUG_BUGVERBOSE
753 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT 754 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
754 depends on BUG 755 depends on BUG
755 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 756 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
756 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE 757 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || \
758 TILE || ARM64
757 default y 759 default y
758 help 760 help
759 Say Y here to make BUG() panics output the file name and line number 761 Say Y here to make BUG() panics output the file name and line number
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 87f4ec6d1f36..a89cbbb61801 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -88,6 +88,12 @@ void get_term_dimensions(struct winsize *ws);
88#define CPUINFO_PROC "Processor" 88#define CPUINFO_PROC "Processor"
89#endif 89#endif
90 90
91#ifdef __aarch64__
92#include "../../arch/arm64/include/asm/unistd.h"
93#define rmb() asm volatile("dmb ld" ::: "memory")
94#define cpu_relax() asm volatile("yield" ::: "memory")
95#endif
96
91#ifdef __mips__ 97#ifdef __mips__
92#include "../../arch/mips/include/asm/unistd.h" 98#include "../../arch/mips/include/asm/unistd.h"
93#define rmb() asm volatile( \ 99#define rmb() asm volatile( \