aboutsummaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/hexagon
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/Kconfig195
-rw-r--r--arch/hexagon/Makefile52
-rw-r--r--arch/hexagon/configs/comet_defconfig85
-rw-r--r--arch/hexagon/include/asm/Kbuild55
-rw-r--r--arch/hexagon/include/asm/atomic.h165
-rw-r--r--arch/hexagon/include/asm/barrier.h41
-rw-r--r--arch/hexagon/include/asm/bitops.h300
-rw-r--r--arch/hexagon/include/asm/cache.h34
-rw-r--r--arch/hexagon/include/asm/cacheflush.h99
-rw-r--r--arch/hexagon/include/asm/checksum.h50
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h90
-rw-r--r--arch/hexagon/include/asm/delay.h28
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h107
-rw-r--r--arch/hexagon/include/asm/dma.h29
-rw-r--r--arch/hexagon/include/asm/elf.h230
-rw-r--r--arch/hexagon/include/asm/exec.h28
-rw-r--r--arch/hexagon/include/asm/fixmap.h73
-rw-r--r--arch/hexagon/include/asm/fpu.h4
-rw-r--r--arch/hexagon/include/asm/futex.h137
-rw-r--r--arch/hexagon/include/asm/hexagon_vm.h281
-rw-r--r--arch/hexagon/include/asm/intrinsics.h26
-rw-r--r--arch/hexagon/include/asm/io.h326
-rw-r--r--arch/hexagon/include/asm/irq.h36
-rw-r--r--arch/hexagon/include/asm/irqflags.h62
-rw-r--r--arch/hexagon/include/asm/kgdb.h43
-rw-r--r--arch/hexagon/include/asm/linkage.h25
-rw-r--r--arch/hexagon/include/asm/mem-layout.h112
-rw-r--r--arch/hexagon/include/asm/mmu.h37
-rw-r--r--arch/hexagon/include/asm/mmu_context.h100
-rw-r--r--arch/hexagon/include/asm/module.h26
-rw-r--r--arch/hexagon/include/asm/mutex.h8
-rw-r--r--arch/hexagon/include/asm/page.h157
-rw-r--r--arch/hexagon/include/asm/perf_event.h22
-rw-r--r--arch/hexagon/include/asm/pgalloc.h146
-rw-r--r--arch/hexagon/include/asm/pgtable.h518
-rw-r--r--arch/hexagon/include/asm/processor.h115
-rw-r--r--arch/hexagon/include/asm/smp.h44
-rw-r--r--arch/hexagon/include/asm/spinlock.h186
-rw-r--r--arch/hexagon/include/asm/spinlock_types.h40
-rw-r--r--arch/hexagon/include/asm/string.h32
-rw-r--r--arch/hexagon/include/asm/suspend.h27
-rw-r--r--arch/hexagon/include/asm/switch_to.h34
-rw-r--r--arch/hexagon/include/asm/syscall.h46
-rw-r--r--arch/hexagon/include/asm/thread_info.h141
-rw-r--r--arch/hexagon/include/asm/time.h29
-rw-r--r--arch/hexagon/include/asm/timer-regs.h39
-rw-r--r--arch/hexagon/include/asm/timex.h36
-rw-r--r--arch/hexagon/include/asm/tlb.h39
-rw-r--r--arch/hexagon/include/asm/tlbflush.h58
-rw-r--r--arch/hexagon/include/asm/traps.h29
-rw-r--r--arch/hexagon/include/asm/uaccess.h116
-rw-r--r--arch/hexagon/include/asm/vdso.h30
-rw-r--r--arch/hexagon/include/asm/vm_fault.h26
-rw-r--r--arch/hexagon/include/asm/vm_mmu.h111
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild15
-rw-r--r--arch/hexagon/include/uapi/asm/bitsperlong.h26
-rw-r--r--arch/hexagon/include/uapi/asm/byteorder.h28
-rw-r--r--arch/hexagon/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/hexagon/include/uapi/asm/param.h26
-rw-r--r--arch/hexagon/include/uapi/asm/ptrace.h39
-rw-r--r--arch/hexagon/include/uapi/asm/registers.h221
-rw-r--r--arch/hexagon/include/uapi/asm/setup.h29
-rw-r--r--arch/hexagon/include/uapi/asm/sigcontext.h33
-rw-r--r--arch/hexagon/include/uapi/asm/signal.h26
-rw-r--r--arch/hexagon/include/uapi/asm/swab.h24
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h32
-rw-r--r--arch/hexagon/include/uapi/asm/user.h63
-rw-r--r--arch/hexagon/kernel/Makefile17
-rw-r--r--arch/hexagon/kernel/asm-offsets.c104
-rw-r--r--arch/hexagon/kernel/dma.c222
-rw-r--r--arch/hexagon/kernel/head.S162
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c40
-rw-r--r--arch/hexagon/kernel/irq_cpu.c90
-rw-r--r--arch/hexagon/kernel/kgdb.c254
-rw-r--r--arch/hexagon/kernel/module.c162
-rw-r--r--arch/hexagon/kernel/process.c204
-rw-r--r--arch/hexagon/kernel/ptrace.c180
-rw-r--r--arch/hexagon/kernel/reset.c38
-rw-r--r--arch/hexagon/kernel/setup.c145
-rw-r--r--arch/hexagon/kernel/signal.c306
-rw-r--r--arch/hexagon/kernel/smp.c270
-rw-r--r--arch/hexagon/kernel/stacktrace.c66
-rw-r--r--arch/hexagon/kernel/syscalltab.c32
-rw-r--r--arch/hexagon/kernel/time.c249
-rw-r--r--arch/hexagon/kernel/topology.c52
-rw-r--r--arch/hexagon/kernel/trampoline.S35
-rw-r--r--arch/hexagon/kernel/traps.c454
-rw-r--r--arch/hexagon/kernel/vdso.c100
-rw-r--r--arch/hexagon/kernel/vm_entry.S273
-rw-r--r--arch/hexagon/kernel/vm_events.c100
-rw-r--r--arch/hexagon/kernel/vm_init_segtable.S442
-rw-r--r--arch/hexagon/kernel/vm_ops.S102
-rw-r--r--arch/hexagon/kernel/vm_switch.S95
-rw-r--r--arch/hexagon/kernel/vm_vectors.S48
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S88
-rw-r--r--arch/hexagon/lib/Makefile4
-rw-r--r--arch/hexagon/lib/checksum.c203
-rw-r--r--arch/hexagon/lib/io.c91
-rw-r--r--arch/hexagon/lib/memcpy.S543
-rw-r--r--arch/hexagon/lib/memset.S315
-rw-r--r--arch/hexagon/mm/Makefile6
-rw-r--r--arch/hexagon/mm/cache.c128
-rw-r--r--arch/hexagon/mm/copy_from_user.S114
-rw-r--r--arch/hexagon/mm/copy_to_user.S92
-rw-r--r--arch/hexagon/mm/copy_user_template.S185
-rw-r--r--arch/hexagon/mm/init.c276
-rw-r--r--arch/hexagon/mm/ioremap.c56
-rw-r--r--arch/hexagon/mm/pgalloc.c23
-rw-r--r--arch/hexagon/mm/strnlen_user.S139
-rw-r--r--arch/hexagon/mm/uaccess.c59
-rw-r--r--arch/hexagon/mm/vm_fault.c200
-rw-r--r--arch/hexagon/mm/vm_tlb.c93
112 files changed, 0 insertions, 12325 deletions
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
deleted file mode 100644
index 0744f7d7b1f..00000000000
--- a/arch/hexagon/Kconfig
+++ /dev/null
@@ -1,195 +0,0 @@
1# Hexagon configuration
2comment "Linux Kernel Configuration for Hexagon"
3
4config HEXAGON
5 def_bool y
6 select HAVE_OPROFILE
7 select USE_GENERIC_SMP_HELPERS if SMP
8 # Other pending projects/to-do items.
9 # select HAVE_REGS_AND_STACK_ACCESS_API
10 # select HAVE_HW_BREAKPOINT if PERF_EVENTS
11 # select ARCH_HAS_CPU_IDLE_WAIT
12 # select ARCH_WANT_OPTIONAL_GPIOLIB
13 # select ARCH_REQUIRE_GPIOLIB
14 # select HAVE_CLK
15 # select IRQ_PER_CPU
16 # select GENERIC_PENDING_IRQ if SMP
17 select HAVE_IRQ_WORK
18 select GENERIC_ATOMIC64
19 select HAVE_PERF_EVENTS
20 select HAVE_GENERIC_HARDIRQS
21 # GENERIC_ALLOCATOR is used by dma_alloc_coherent()
22 select GENERIC_ALLOCATOR
23 select GENERIC_IRQ_SHOW
24 select HAVE_ARCH_KGDB
25 select HAVE_ARCH_TRACEHOOK
26 select NO_IOPORT
27 select GENERIC_IOMAP
28 select GENERIC_SMP_IDLE_THREAD
29 select STACKTRACE_SUPPORT
30 select KTIME_SCALAR
31 select GENERIC_CLOCKEVENTS
32 select GENERIC_CLOCKEVENTS_BROADCAST
33 select MODULES_USE_ELF_RELA
34 ---help---
35 Qualcomm Hexagon is a processor architecture designed for high
36 performance and low power across a wide variety of applications.
37
38config HEXAGON_ARCH_V1
39 bool
40
41config HEXAGON_ARCH_V2
42 bool
43
44config HEXAGON_ARCH_V3
45 bool
46
47config HEXAGON_ARCH_V4
48 bool
49
50config FRAME_POINTER
51 def_bool y
52
53config LOCKDEP_SUPPORT
54 def_bool y
55
56config PCI
57 def_bool n
58
59config EARLY_PRINTK
60 def_bool y
61
62config MMU
63 def_bool y
64
65config TRACE_IRQFLAGS_SUPPORT
66 def_bool y
67
68config GENERIC_CSUM
69 def_bool y
70
71#
72# Use the generic interrupt handling code in kernel/irq/:
73#
74config GENERIC_IRQ_PROBE
75 def_bool y
76
77config NEED_SG_DMA_LENGTH
78 def_bool y
79
80config RWSEM_GENERIC_SPINLOCK
81 def_bool n
82
83config RWSEM_XCHGADD_ALGORITHM
84 def_bool y
85
86config GENERIC_FIND_NEXT_BIT
87 def_bool y
88
89config GENERIC_HWEIGHT
90 def_bool y
91
92config STACKTRACE_SUPPORT
93 def_bool y
94 select STACKTRACE
95
96config GENERIC_BUG
97 def_bool y
98 depends on BUG
99
100menu "Machine selection"
101
102choice
103 prompt "System type"
104 default HEXAGON_COMET
105
106config HEXAGON_COMET
107 bool "Comet Board"
108 select HEXAGON_ARCH_V2
109 ---help---
110 Support for the Comet platform.
111
112endchoice
113
114config HEXAGON_VM
115 def_bool y
116
117config CMDLINE
118 string "Default kernel command string"
119 default ""
120 help
121 On some platforms, there is currently no way for the boot loader
122 to pass arguments to the kernel. For these, you should supply some
123 command-line options at build time by entering them here. At a
124 minimum, you should specify the memory size and the root device
125 (e.g., mem=64M root=/dev/nfs).
126
127config HEXAGON_ANGEL_TRAPS
128 bool "Use Angel Traps"
129 default n
130 ---help---
131 Enable angel debug traps (for printk's).
132
133config SMP
134 bool "Multi-Processing support"
135 ---help---
136 Enables SMP support in the kernel. If unsure, say "Y"
137
138config NR_CPUS
139 int "Maximum number of CPUs" if SMP
140 range 2 6 if SMP
141 default "1" if !SMP
142 default "6" if SMP
143 ---help---
144 This allows you to specify the maximum number of CPUs which this
145 kernel will support. The maximum supported value is 6 and the
146 minimum value which makes sense is 2.
147
148 This is purely to save memory - each supported CPU adds
149 approximately eight kilobytes to the kernel image.
150
151choice
152 prompt "Kernel page size"
153 default PAGE_SIZE_4KB
154 ---help---
155 Changes the default page size; use with caution.
156
157config PAGE_SIZE_4KB
158 bool "4KB"
159
160config PAGE_SIZE_16KB
161 bool "16KB"
162
163config PAGE_SIZE_64KB
164 bool "64KB"
165
166config PAGE_SIZE_256KB
167 bool "256KB"
168
169endchoice
170
171source "mm/Kconfig"
172
173source "kernel/Kconfig.hz"
174
175config GENERIC_GPIO
176 def_bool n
177
178endmenu
179
180source "init/Kconfig"
181source "drivers/Kconfig"
182source "fs/Kconfig"
183
184menu "Executable File Formats"
185source "fs/Kconfig.binfmt"
186endmenu
187
188source "net/Kconfig"
189source "security/Kconfig"
190source "crypto/Kconfig"
191source "lib/Kconfig"
192
193menu "Kernel hacking"
194source "lib/Kconfig.debug"
195endmenu
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
deleted file mode 100644
index d00d900b256..00000000000
--- a/arch/hexagon/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
1# Makefile for the Hexagon arch
2
3KBUILD_DEFCONFIG = comet_defconfig
4
5# Do not use GP-relative jumps
6KBUILD_CFLAGS += -G0
7LDFLAGS_vmlinux += -G0
8
9# Do not use single-byte enums; these will overflow.
10KBUILD_CFLAGS += -fno-short-enums
11
12# Modules must use either long-calls, or use pic/plt.
13# Use long-calls for now, it's easier. And faster.
14# CFLAGS_MODULE += -fPIC
15# LDFLAGS_MODULE += -shared
16CFLAGS_MODULE += -mlong-calls
17
18cflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
19cflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
20cflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
21cflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
22
23aflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
24aflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
25aflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
26aflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
27
28ldflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
29ldflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
30ldflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
31ldflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
32
33KBUILD_CFLAGS += $(cflags-y)
34KBUILD_AFLAGS += $(aflags-y)
35
36# no KBUILD_LDFLAGS?
37LDFLAGS += $(ldflags-y)
38
39# Thread-info register will be r19. This value is not configureable;
40# it is hard-coded in several files.
41TIR_NAME := r19
42KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
43KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
44
45LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
46libs-y += $(LIBGCC)
47
48head-y := arch/hexagon/kernel/head.o
49
50core-y += arch/hexagon/kernel/ \
51 arch/hexagon/mm/ \
52 arch/hexagon/lib/
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
deleted file mode 100644
index e324f65f41e..00000000000
--- a/arch/hexagon/configs/comet_defconfig
+++ /dev/null
@@ -1,85 +0,0 @@
1CONFIG_SMP=y
2CONFIG_DEFAULT_MMAP_MIN_ADDR=0
3CONFIG_HZ_100=y
4CONFIG_EXPERIMENTAL=y
5CONFIG_CROSS_COMPILE="hexagon-"
6CONFIG_LOCALVERSION="-smp"
7# CONFIG_LOCALVERSION_AUTO is not set
8CONFIG_SYSVIPC=y
9CONFIG_POSIX_MQUEUE=y
10CONFIG_BSD_PROCESS_ACCT=y
11CONFIG_BSD_PROCESS_ACCT_V3=y
12CONFIG_TASKSTATS=y
13CONFIG_TASK_DELAY_ACCT=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_LOG_BUF_SHIFT=18
17CONFIG_BLK_DEV_INITRD=y
18CONFIG_EMBEDDED=y
19# CONFIG_VM_EVENT_COUNTERS is not set
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
22# CONFIG_STANDALONE is not set
23CONFIG_CONNECTOR=y
24CONFIG_BLK_DEV_LOOP=y
25CONFIG_BLK_DEV_NBD=y
26CONFIG_NETDEVICES=y
27CONFIG_MII=y
28CONFIG_PHYLIB=y
29CONFIG_NET_ETHERNET=y
30# CONFIG_NETDEV_1000 is not set
31# CONFIG_NETDEV_10000 is not set
32# CONFIG_INPUT_MOUSEDEV is not set
33# CONFIG_INPUT_KEYBOARD is not set
34# CONFIG_INPUT_MOUSE is not set
35# CONFIG_SERIO is not set
36# CONFIG_CONSOLE_TRANSLATIONS is not set
37CONFIG_LEGACY_PTY_COUNT=64
38# CONFIG_DEVKMEM is not set
39# CONFIG_HW_RANDOM is not set
40CONFIG_SPI=y
41CONFIG_SPI_DEBUG=y
42CONFIG_SPI_BITBANG=y
43# CONFIG_HWMON is not set
44# CONFIG_VGA_CONSOLE is not set
45# CONFIG_HID_SUPPORT is not set
46# CONFIG_USB_SUPPORT is not set
47CONFIG_EXT2_FS=y
48CONFIG_EXT2_FS_XATTR=y
49CONFIG_EXT2_FS_POSIX_ACL=y
50CONFIG_EXT2_FS_SECURITY=y
51CONFIG_EXT3_FS=y
52# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
53CONFIG_EXT3_FS_POSIX_ACL=y
54CONFIG_EXT3_FS_SECURITY=y
55CONFIG_QUOTA=y
56CONFIG_PROC_KCORE=y
57CONFIG_TMPFS=y
58CONFIG_TMPFS_POSIX_ACL=y
59# CONFIG_MISC_FILESYSTEMS is not set
60CONFIG_NFS_FS=y
61CONFIG_NFS_V3=y
62CONFIG_NFS_V3_ACL=y
63# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
64CONFIG_NET=y
65CONFIG_PACKET=y
66CONFIG_UNIX=y
67CONFIG_INET=y
68# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
69# CONFIG_INET_XFRM_MODE_TUNNEL is not set
70# CONFIG_INET_XFRM_MODE_BEET is not set
71# CONFIG_INET_LRO is not set
72# CONFIG_INET_DIAG is not set
73# CONFIG_IPV6 is not set
74CONFIG_CRYPTO_MD5=y
75# CONFIG_CRYPTO_ANSI_CPRNG is not set
76# CONFIG_CRYPTO_HW is not set
77CONFIG_CRC_CCITT=y
78CONFIG_CRC16=y
79CONFIG_CRC_T10DIF=y
80CONFIG_LIBCRC32C=y
81CONFIG_FRAME_WARN=0
82CONFIG_MAGIC_SYSRQ=y
83CONFIG_DEBUG_FS=y
84# CONFIG_SCHED_DEBUG is not set
85CONFIG_DEBUG_INFO=y
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
deleted file mode 100644
index bdb54ceb53b..00000000000
--- a/arch/hexagon/include/asm/Kbuild
+++ /dev/null
@@ -1,55 +0,0 @@
1
2header-y += ucontext.h
3
4generic-y += auxvec.h
5generic-y += bug.h
6generic-y += bugs.h
7generic-y += clkdev.h
8generic-y += cputime.h
9generic-y += current.h
10generic-y += device.h
11generic-y += div64.h
12generic-y += emergency-restart.h
13generic-y += errno.h
14generic-y += fb.h
15generic-y += fcntl.h
16generic-y += ftrace.h
17generic-y += hardirq.h
18generic-y += hw_irq.h
19generic-y += ioctl.h
20generic-y += ioctls.h
21generic-y += iomap.h
22generic-y += ipcbuf.h
23generic-y += irq_regs.h
24generic-y += kdebug.h
25generic-y += kmap_types.h
26generic-y += local64.h
27generic-y += local.h
28generic-y += local.h
29generic-y += mman.h
30generic-y += msgbuf.h
31generic-y += pci.h
32generic-y += percpu.h
33generic-y += poll.h
34generic-y += posix_types.h
35generic-y += resource.h
36generic-y += rwsem.h
37generic-y += scatterlist.h
38generic-y += sections.h
39generic-y += segment.h
40generic-y += sembuf.h
41generic-y += shmbuf.h
42generic-y += shmparam.h
43generic-y += siginfo.h
44generic-y += socket.h
45generic-y += sockios.h
46generic-y += statfs.h
47generic-y += stat.h
48generic-y += termbits.h
49generic-y += termios.h
50generic-y += topology.h
51generic-y += trace_clock.h
52generic-y += types.h
53generic-y += ucontext.h
54generic-y += unaligned.h
55generic-y += xor.h
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
deleted file mode 100644
index 468fbb0781c..00000000000
--- a/arch/hexagon/include/asm/atomic.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Atomic operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_ATOMIC_H
23#define _ASM_ATOMIC_H
24
25#include <linux/types.h>
26#include <asm/cmpxchg.h>
27
28#define ATOMIC_INIT(i) { (i) }
29#define atomic_set(v, i) ((v)->counter = (i))
30
31/**
32 * atomic_read - reads a word, atomically
33 * @v: pointer to atomic value
34 *
35 * Assumes all word reads on our architecture are atomic.
36 */
37#define atomic_read(v) ((v)->counter)
38
39/**
40 * atomic_xchg - atomic
41 * @v: pointer to memory to change
42 * @new: new value (technically passed in a register -- see xchg)
43 */
44#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
45
46
47/**
48 * atomic_cmpxchg - atomic compare-and-exchange values
49 * @v: pointer to value to change
50 * @old: desired old value to match
51 * @new: new value to put in
52 *
53 * Parameters are then pointer, value-in-register, value-in-register,
54 * and the output is the old value.
55 *
56 * Apparently this is complicated for archs that don't support
57 * the memw_locked like we do (or it's broken or whatever).
58 *
59 * Kind of the lynchpin of the rest of the generically defined routines.
60 * Remember V2 had that bug with dotnew predicate set by memw_locked.
61 *
62 * "old" is "expected" old val, __oldval is actual old value
63 */
64static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
65{
66 int __oldval;
67
68 asm volatile(
69 "1: %0 = memw_locked(%1);\n"
70 " { P0 = cmp.eq(%0,%2);\n"
71 " if (!P0.new) jump:nt 2f; }\n"
72 " memw_locked(%1,P0) = %3;\n"
73 " if (!P0) jump 1b;\n"
74 "2:\n"
75 : "=&r" (__oldval)
76 : "r" (&v->counter), "r" (old), "r" (new)
77 : "memory", "p0"
78 );
79
80 return __oldval;
81}
82
83static inline int atomic_add_return(int i, atomic_t *v)
84{
85 int output;
86
87 __asm__ __volatile__ (
88 "1: %0 = memw_locked(%1);\n"
89 " %0 = add(%0,%2);\n"
90 " memw_locked(%1,P3)=%0;\n"
91 " if !P3 jump 1b;\n"
92 : "=&r" (output)
93 : "r" (&v->counter), "r" (i)
94 : "memory", "p3"
95 );
96 return output;
97
98}
99
100#define atomic_add(i, v) atomic_add_return(i, (v))
101
102static inline int atomic_sub_return(int i, atomic_t *v)
103{
104 int output;
105 __asm__ __volatile__ (
106 "1: %0 = memw_locked(%1);\n"
107 " %0 = sub(%0,%2);\n"
108 " memw_locked(%1,P3)=%0\n"
109 " if !P3 jump 1b;\n"
110 : "=&r" (output)
111 : "r" (&v->counter), "r" (i)
112 : "memory", "p3"
113 );
114 return output;
115}
116
117#define atomic_sub(i, v) atomic_sub_return(i, (v))
118
119/**
120 * atomic_add_unless - add unless the number is a given value
121 * @v: pointer to value
122 * @a: amount to add
123 * @u: unless value is equal to u
124 *
125 * Returns 1 if the add happened, 0 if it didn't.
126 */
127static inline int __atomic_add_unless(atomic_t *v, int a, int u)
128{
129 int output, __oldval;
130 asm volatile(
131 "1: %0 = memw_locked(%2);"
132 " {"
133 " p3 = cmp.eq(%0, %4);"
134 " if (p3.new) jump:nt 2f;"
135 " %0 = add(%0, %3);"
136 " %1 = #0;"
137 " }"
138 " memw_locked(%2, p3) = %0;"
139 " {"
140 " if !p3 jump 1b;"
141 " %1 = #1;"
142 " }"
143 "2:"
144 : "=&r" (__oldval), "=&r" (output)
145 : "r" (v), "r" (a), "r" (u)
146 : "memory", "p3"
147 );
148 return output;
149}
150
151#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152
153#define atomic_inc(v) atomic_add(1, (v))
154#define atomic_dec(v) atomic_sub(1, (v))
155
156#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
157#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
158#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
159#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
160
161
162#define atomic_inc_return(v) (atomic_add_return(1, v))
163#define atomic_dec_return(v) (atomic_sub_return(1, v))
164
165#endif
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
deleted file mode 100644
index 1041a8e70ce..00000000000
--- a/arch/hexagon/include/asm/barrier.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Memory barrier definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_BARRIER_H
22#define _ASM_BARRIER_H
23
24#define rmb() barrier()
25#define read_barrier_depends() barrier()
26#define wmb() barrier()
27#define mb() barrier()
28#define smp_rmb() barrier()
29#define smp_read_barrier_depends() barrier()
30#define smp_wmb() barrier()
31#define smp_mb() barrier()
32#define smp_mb__before_atomic_dec() barrier()
33#define smp_mb__after_atomic_dec() barrier()
34#define smp_mb__before_atomic_inc() barrier()
35#define smp_mb__after_atomic_inc() barrier()
36
37/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
38#define set_mb(var, value) \
39 do { var = value; mb(); } while (0)
40
41#endif /* _ASM_BARRIER_H */
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
deleted file mode 100644
index 9b1e4afbab3..00000000000
--- a/arch/hexagon/include/asm/bitops.h
+++ /dev/null
@@ -1,300 +0,0 @@
1/*
2 * Bit operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_BITOPS_H
23#define _ASM_BITOPS_H
24
25#include <linux/compiler.h>
26#include <asm/byteorder.h>
27#include <asm/atomic.h>
28
29#ifdef __KERNEL__
30
31#define smp_mb__before_clear_bit() barrier()
32#define smp_mb__after_clear_bit() barrier()
33
34/*
35 * The offset calculations for these are based on BITS_PER_LONG == 32
36 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
37 * mask by 0x0000001F)
38 *
39 * Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
40 */
41
42/**
43 * test_and_clear_bit - clear a bit and return its old value
44 * @nr: bit number to clear
45 * @addr: pointer to memory
46 */
47static inline int test_and_clear_bit(int nr, volatile void *addr)
48{
49 int oldval;
50
51 __asm__ __volatile__ (
52 " {R10 = %1; R11 = asr(%2,#5); }\n"
53 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
54 "1: R12 = memw_locked(R10);\n"
55 " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
56 " memw_locked(R10,P1) = R12;\n"
57 " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
58 : "=&r" (oldval)
59 : "r" (addr), "r" (nr)
60 : "r10", "r11", "r12", "p0", "p1", "memory"
61 );
62
63 return oldval;
64}
65
66/**
67 * test_and_set_bit - set a bit and return its old value
68 * @nr: bit number to set
69 * @addr: pointer to memory
70 */
71static inline int test_and_set_bit(int nr, volatile void *addr)
72{
73 int oldval;
74
75 __asm__ __volatile__ (
76 " {R10 = %1; R11 = asr(%2,#5); }\n"
77 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
78 "1: R12 = memw_locked(R10);\n"
79 " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
80 " memw_locked(R10,P1) = R12;\n"
81 " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
82 : "=&r" (oldval)
83 : "r" (addr), "r" (nr)
84 : "r10", "r11", "r12", "p0", "p1", "memory"
85 );
86
87
88 return oldval;
89
90}
91
92/**
93 * test_and_change_bit - toggle a bit and return its old value
94 * @nr: bit number to set
95 * @addr: pointer to memory
96 */
97static inline int test_and_change_bit(int nr, volatile void *addr)
98{
99 int oldval;
100
101 __asm__ __volatile__ (
102 " {R10 = %1; R11 = asr(%2,#5); }\n"
103 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
104 "1: R12 = memw_locked(R10);\n"
105 " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
106 " memw_locked(R10,P1) = R12;\n"
107 " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
108 : "=&r" (oldval)
109 : "r" (addr), "r" (nr)
110 : "r10", "r11", "r12", "p0", "p1", "memory"
111 );
112
113 return oldval;
114
115}
116
117/*
118 * Atomic, but doesn't care about the return value.
119 * Rewrite later to save a cycle or two.
120 */
121
122static inline void clear_bit(int nr, volatile void *addr)
123{
124 test_and_clear_bit(nr, addr);
125}
126
127static inline void set_bit(int nr, volatile void *addr)
128{
129 test_and_set_bit(nr, addr);
130}
131
132static inline void change_bit(int nr, volatile void *addr)
133{
134 test_and_change_bit(nr, addr);
135}
136
137
138/*
139 * These are allowed to be non-atomic. In fact the generic flavors are
140 * in non-atomic.h. Would it be better to use intrinsics for this?
141 *
142 * OK, writes in our architecture do not invalidate LL/SC, so this has to
143 * be atomic, particularly for things like slab_lock and slab_unlock.
144 *
145 */
146static inline void __clear_bit(int nr, volatile unsigned long *addr)
147{
148 test_and_clear_bit(nr, addr);
149}
150
151static inline void __set_bit(int nr, volatile unsigned long *addr)
152{
153 test_and_set_bit(nr, addr);
154}
155
156static inline void __change_bit(int nr, volatile unsigned long *addr)
157{
158 test_and_change_bit(nr, addr);
159}
160
161/* Apparently, at least some of these are allowed to be non-atomic */
162static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
163{
164 return test_and_clear_bit(nr, addr);
165}
166
167static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
168{
169 return test_and_set_bit(nr, addr);
170}
171
172static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
173{
174 return test_and_change_bit(nr, addr);
175}
176
177static inline int __test_bit(int nr, const volatile unsigned long *addr)
178{
179 int retval;
180
181 asm volatile(
182 "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
183 : "=&r" (retval)
184 : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
185 : "p0"
186 );
187
188 return retval;
189}
190
191#define test_bit(nr, addr) __test_bit(nr, addr)
192
193/*
194 * ffz - find first zero in word.
195 * @word: The word to search
196 *
197 * Undefined if no zero exists, so code should check against ~0UL first.
198 */
199static inline long ffz(int x)
200{
201 int r;
202
203 asm("%0 = ct1(%1);\n"
204 : "=&r" (r)
205 : "r" (x));
206 return r;
207}
208
209/*
210 * fls - find last (most-significant) bit set
211 * @x: the word to search
212 *
213 * This is defined the same way as ffs.
214 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
215 */
216static inline long fls(int x)
217{
218 int r;
219
220 asm("{ %0 = cl0(%1);}\n"
221 "%0 = sub(#32,%0);\n"
222 : "=&r" (r)
223 : "r" (x)
224 : "p0");
225
226 return r;
227}
228
229/*
230 * ffs - find first bit set
231 * @x: the word to search
232 *
233 * This is defined the same way as
234 * the libc and compiler builtin ffs routines, therefore
235 * differs in spirit from the above ffz (man ffs).
236 */
237static inline long ffs(int x)
238{
239 int r;
240
241 asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
242 "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
243 : "=&r" (r)
244 : "r" (x)
245 : "p0");
246
247 return r;
248}
249
250/*
251 * __ffs - find first bit in word.
252 * @word: The word to search
253 *
254 * Undefined if no bit exists, so code should check against 0 first.
255 *
256 * bits_per_long assumed to be 32
257 * numbering starts at 0 I think (instead of 1 like ffs)
258 */
259static inline unsigned long __ffs(unsigned long word)
260{
261 int num;
262
263 asm("%0 = ct0(%1);\n"
264 : "=&r" (num)
265 : "r" (word));
266
267 return num;
268}
269
270/*
271 * __fls - find last (most-significant) set bit in a long word
272 * @word: the word to search
273 *
274 * Undefined if no set bit exists, so code should check against 0 first.
275 * bits_per_long assumed to be 32
276 */
277static inline unsigned long __fls(unsigned long word)
278{
279 int num;
280
281 asm("%0 = cl0(%1);\n"
282 "%0 = sub(#31,%0);\n"
283 : "=&r" (num)
284 : "r" (word));
285
286 return num;
287}
288
289#include <asm-generic/bitops/lock.h>
290#include <asm-generic/bitops/find.h>
291
292#include <asm-generic/bitops/fls64.h>
293#include <asm-generic/bitops/sched.h>
294#include <asm-generic/bitops/hweight.h>
295
296#include <asm-generic/bitops/le.h>
297#include <asm-generic/bitops/ext2-atomic.h>
298
299#endif /* __KERNEL__ */
300#endif
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
deleted file mode 100644
index f4ca594fdf8..00000000000
--- a/arch/hexagon/include/asm/cache.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Cache definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef __ASM_CACHE_H
22#define __ASM_CACHE_H
23
24/* Bytes per L1 cache line */
25#define L1_CACHE_SHIFT (5)
26#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
27
28#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
29#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
30
31/* See http://kerneltrap.org/node/15100 */
32#define __read_mostly
33
34#endif
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
deleted file mode 100644
index 49e0896ec24..00000000000
--- a/arch/hexagon/include/asm/cacheflush.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Cache flush operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_CACHEFLUSH_H
22#define _ASM_CACHEFLUSH_H
23
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <asm/string.h>
27#include <asm-generic/cacheflush.h>
28
29/* Cache flushing:
30 *
31 * - flush_cache_all() flushes entire cache
32 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
33 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
34 * - flush_cache_range(vma, start, end) flushes a range of pages
35 * - flush_icache_range(start, end) flush a range of instructions
36 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
37 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
38 *
39 * Need to doublecheck which one is really needed for ptrace stuff to work.
40 */
41#define LINESIZE 32
42#define LINEBITS 5
43
44/*
45 * Flush Dcache range through current map.
46 */
47extern void flush_dcache_range(unsigned long start, unsigned long end);
48
49/*
50 * Flush Icache range through current map.
51 */
52#undef flush_icache_range
53extern void flush_icache_range(unsigned long start, unsigned long end);
54
55/*
56 * Memory-management related flushes are there to ensure in non-physically
57 * indexed cache schemes that stale lines belonging to a given ASID aren't
58 * in the cache to confuse things. The prototype Hexagon Virtual Machine
59 * only uses a single ASID for all user-mode maps, which should
60 * mean that they aren't necessary. A brute-force, flush-everything
61 * implementation, with the name xxxxx_hexagon() is present in
62 * arch/hexagon/mm/cache.c, but let's not wire it up until we know
63 * it is needed.
64 */
65extern void flush_cache_all_hexagon(void);
66
67/*
68 * This may or may not ever have to be non-null, depending on the
69 * virtual machine MMU. For a native kernel, it's definitiely a no-op
70 *
71 * This is also the place where deferred cache coherency stuff seems
72 * to happen, classically... but instead we do it like ia64 and
73 * clean the cache when the PTE is set.
74 *
75 */
76static inline void update_mmu_cache(struct vm_area_struct *vma,
77 unsigned long address, pte_t *ptep)
78{
79 /* generic_ptrace_pokedata doesn't wind up here, does it? */
80}
81
82#undef copy_to_user_page
83static inline void copy_to_user_page(struct vm_area_struct *vma,
84 struct page *page,
85 unsigned long vaddr,
86 void *dst, void *src, int len)
87{
88 memcpy(dst, src, len);
89 if (vma->vm_flags & VM_EXEC) {
90 flush_icache_range((unsigned long) dst,
91 (unsigned long) dst + len);
92 }
93}
94
95
96extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
97extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
98
99#endif
diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h
deleted file mode 100644
index 46ec8a7fd65..00000000000
--- a/arch/hexagon/include/asm/checksum.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_CHECKSUM_H
20#define _ASM_CHECKSUM_H
21
22#define do_csum do_csum
23unsigned int do_csum(const void *voidptr, int len);
24
25/*
26 * the same as csum_partial, but copies from src while it
27 * checksums
28 *
29 * here even more important to align src and dst on a 32-bit (or even
30 * better 64-bit) boundary
31 */
32#define csum_partial_copy_nocheck csum_partial_copy_nocheck
33__wsum csum_partial_copy_nocheck(const void *src, void *dst,
34 int len, __wsum sum);
35
36/*
37 * computes the checksum of the TCP/UDP pseudo-header
38 * returns a 16-bit checksum, already complemented
39 */
40#define csum_tcpudp_nofold csum_tcpudp_nofold
41__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
42 unsigned short len, unsigned short proto, __wsum sum);
43
44#define csum_tcpudp_magic csum_tcpudp_magic
45__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
46 unsigned short len, unsigned short proto, __wsum sum);
47
48#include <asm-generic/checksum.h>
49
50#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
deleted file mode 100644
index 9e7802911a5..00000000000
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * xchg/cmpxchg operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_CMPXCHG_H
23#define _ASM_CMPXCHG_H
24
25/*
26 * __xchg - atomically exchange a register and a memory location
27 * @x: value to swap
28 * @ptr: pointer to memory
29 * @size: size of the value
30 *
31 * Only 4 bytes supported currently.
32 *
33 * Note: there was an errata for V2 about .new's and memw_locked.
34 *
35 */
36static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
37 int size)
38{
39 unsigned long retval;
40
41 /* Can't seem to use printk or panic here, so just stop */
42 if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
43
44 __asm__ __volatile__ (
45 "1: %0 = memw_locked(%1);\n" /* load into retval */
46 " memw_locked(%1,P0) = %2;\n" /* store into memory */
47 " if !P0 jump 1b;\n"
48 : "=&r" (retval)
49 : "r" (ptr), "r" (x)
50 : "memory", "p0"
51 );
52 return retval;
53}
54
55/*
56 * Atomically swap the contents of a register with memory. Should be atomic
57 * between multiple CPU's and within interrupts on the same CPU.
58 */
59#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
60 sizeof(*(ptr))))
61
62/*
63 * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
64 * looks just like atomic_cmpxchg on our arch currently with a bunch of
65 * variable casting.
66 */
67#define __HAVE_ARCH_CMPXCHG 1
68
69#define cmpxchg(ptr, old, new) \
70({ \
71 __typeof__(ptr) __ptr = (ptr); \
72 __typeof__(*(ptr)) __old = (old); \
73 __typeof__(*(ptr)) __new = (new); \
74 __typeof__(*(ptr)) __oldval = 0; \
75 \
76 asm volatile( \
77 "1: %0 = memw_locked(%1);\n" \
78 " { P0 = cmp.eq(%0,%2);\n" \
79 " if (!P0.new) jump:nt 2f; }\n" \
80 " memw_locked(%1,p0) = %3;\n" \
81 " if (!P0) jump 1b;\n" \
82 "2:\n" \
83 : "=&r" (__oldval) \
84 : "r" (__ptr), "r" (__old), "r" (__new) \
85 : "memory", "p0" \
86 ); \
87 __oldval; \
88})
89
90#endif /* _ASM_CMPXCHG_H */
diff --git a/arch/hexagon/include/asm/delay.h b/arch/hexagon/include/asm/delay.h
deleted file mode 100644
index 53079719d66..00000000000
--- a/arch/hexagon/include/asm/delay.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_DELAY_H
20#define _ASM_DELAY_H
21
22#include <asm/param.h>
23
24extern void __udelay(unsigned long usecs);
25
26#define udelay(usecs) __udelay((usecs))
27
28#endif /* _ASM_DELAY_H */
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
deleted file mode 100644
index 85e9935660c..00000000000
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * DMA operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_DMA_MAPPING_H
22#define _ASM_DMA_MAPPING_H
23
24#include <linux/types.h>
25#include <linux/cache.h>
26#include <linux/mm.h>
27#include <linux/scatterlist.h>
28#include <linux/dma-mapping.h>
29#include <linux/dma-debug.h>
30#include <linux/dma-attrs.h>
31#include <asm/io.h>
32
33struct device;
34extern int bad_dma_address;
35
36extern struct dma_map_ops *dma_ops;
37
38#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40
41static inline struct dma_map_ops *get_dma_ops(struct device *dev)
42{
43 if (unlikely(dev == NULL))
44 return NULL;
45
46 return dma_ops;
47}
48
49extern int dma_supported(struct device *dev, u64 mask);
50extern int dma_set_mask(struct device *dev, u64 mask);
51extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
52extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
53 enum dma_data_direction direction);
54
55#include <asm-generic/dma-mapping-common.h>
56
57static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
58{
59 if (!dev->dma_mask)
60 return 0;
61 return addr + size - 1 <= *dev->dma_mask;
62}
63
64static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
65{
66 struct dma_map_ops *dma_ops = get_dma_ops(dev);
67
68 if (dma_ops->mapping_error)
69 return dma_ops->mapping_error(dev, dma_addr);
70
71 return (dma_addr == bad_dma_address);
72}
73
74#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
75
76static inline void *dma_alloc_attrs(struct device *dev, size_t size,
77 dma_addr_t *dma_handle, gfp_t flag,
78 struct dma_attrs *attrs)
79{
80 void *ret;
81 struct dma_map_ops *ops = get_dma_ops(dev);
82
83 BUG_ON(!dma_ops);
84
85 ret = ops->alloc(dev, size, dma_handle, flag, attrs);
86
87 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
88
89 return ret;
90}
91
92#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
93
94static inline void dma_free_attrs(struct device *dev, size_t size,
95 void *cpu_addr, dma_addr_t dma_handle,
96 struct dma_attrs *attrs)
97{
98 struct dma_map_ops *dma_ops = get_dma_ops(dev);
99
100 BUG_ON(!dma_ops);
101
102 dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
103
104 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
105}
106
107#endif
diff --git a/arch/hexagon/include/asm/dma.h b/arch/hexagon/include/asm/dma.h
deleted file mode 100644
index 9e34ff49f3b..00000000000
--- a/arch/hexagon/include/asm/dma.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_DMA_H
20#define _ASM_DMA_H
21
22#include <asm/io.h>
23
24#define MAX_DMA_CHANNELS 1
25#define MAX_DMA_ADDRESS (PAGE_OFFSET)
26
27extern size_t hexagon_coherent_pool_size;
28
29#endif
diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h
deleted file mode 100644
index 1ba4b3bff5e..00000000000
--- a/arch/hexagon/include/asm/elf.h
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * ELF definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef __ASM_ELF_H
22#define __ASM_ELF_H
23
24#include <asm/ptrace.h>
25#include <asm/user.h>
26
27/*
28 * This should really be in linux/elf-em.h.
29 */
30#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
31
32struct elf32_hdr;
33
34/*
35 * ELF header e_flags defines.
36 */
37
38/* should have stuff like "CPU type" and maybe "ABI version", etc */
39
40/* Hexagon relocations */
41 /* V2 */
42#define R_HEXAGON_NONE 0
43#define R_HEXAGON_B22_PCREL 1
44#define R_HEXAGON_B15_PCREL 2
45#define R_HEXAGON_B7_PCREL 3
46#define R_HEXAGON_LO16 4
47#define R_HEXAGON_HI16 5
48#define R_HEXAGON_32 6
49#define R_HEXAGON_16 7
50#define R_HEXAGON_8 8
51#define R_HEXAGON_GPREL16_0 9
52#define R_HEXAGON_GPREL16_1 10
53#define R_HEXAGON_GPREL16_2 11
54#define R_HEXAGON_GPREL16_3 12
55#define R_HEXAGON_HL16 13
56 /* V3 */
57#define R_HEXAGON_B13_PCREL 14
58 /* V4 */
59#define R_HEXAGON_B9_PCREL 15
60 /* V4 (extenders) */
61#define R_HEXAGON_B32_PCREL_X 16
62#define R_HEXAGON_32_6_X 17
63 /* V4 (extended) */
64#define R_HEXAGON_B22_PCREL_X 18
65#define R_HEXAGON_B15_PCREL_X 19
66#define R_HEXAGON_B13_PCREL_X 20
67#define R_HEXAGON_B9_PCREL_X 21
68#define R_HEXAGON_B7_PCREL_X 22
69#define R_HEXAGON_16_X 23
70#define R_HEXAGON_12_X 24
71#define R_HEXAGON_11_X 25
72#define R_HEXAGON_10_X 26
73#define R_HEXAGON_9_X 27
74#define R_HEXAGON_8_X 28
75#define R_HEXAGON_7_X 29
76#define R_HEXAGON_6_X 30
77 /* V2 PIC */
78#define R_HEXAGON_32_PCREL 31
79#define R_HEXAGON_COPY 32
80#define R_HEXAGON_GLOB_DAT 33
81#define R_HEXAGON_JMP_SLOT 34
82#define R_HEXAGON_RELATIVE 35
83#define R_HEXAGON_PLT_B22_PCREL 36
84#define R_HEXAGON_GOTOFF_LO16 37
85#define R_HEXAGON_GOTOFF_HI16 38
86#define R_HEXAGON_GOTOFF_32 39
87#define R_HEXAGON_GOT_LO16 40
88#define R_HEXAGON_GOT_HI16 41
89#define R_HEXAGON_GOT_32 42
90#define R_HEXAGON_GOT_16 43
91
92/*
93 * ELF register definitions..
94 */
95typedef unsigned long elf_greg_t;
96
97typedef struct user_regs_struct elf_gregset_t;
98#define ELF_NGREG (sizeof(elf_gregset_t)/sizeof(unsigned long))
99
100/* Placeholder */
101typedef unsigned long elf_fpregset_t;
102
103/*
104 * Bypass the whole "regsets" thing for now and use the define.
105 */
106
107#define ELF_CORE_COPY_REGS(DEST, REGS) \
108do { \
109 DEST.r0 = REGS->r00; \
110 DEST.r1 = REGS->r01; \
111 DEST.r2 = REGS->r02; \
112 DEST.r3 = REGS->r03; \
113 DEST.r4 = REGS->r04; \
114 DEST.r5 = REGS->r05; \
115 DEST.r6 = REGS->r06; \
116 DEST.r7 = REGS->r07; \
117 DEST.r8 = REGS->r08; \
118 DEST.r9 = REGS->r09; \
119 DEST.r10 = REGS->r10; \
120 DEST.r11 = REGS->r11; \
121 DEST.r12 = REGS->r12; \
122 DEST.r13 = REGS->r13; \
123 DEST.r14 = REGS->r14; \
124 DEST.r15 = REGS->r15; \
125 DEST.r16 = REGS->r16; \
126 DEST.r17 = REGS->r17; \
127 DEST.r18 = REGS->r18; \
128 DEST.r19 = REGS->r19; \
129 DEST.r20 = REGS->r20; \
130 DEST.r21 = REGS->r21; \
131 DEST.r22 = REGS->r22; \
132 DEST.r23 = REGS->r23; \
133 DEST.r24 = REGS->r24; \
134 DEST.r25 = REGS->r25; \
135 DEST.r26 = REGS->r26; \
136 DEST.r27 = REGS->r27; \
137 DEST.r28 = REGS->r28; \
138 DEST.r29 = pt_psp(REGS); \
139 DEST.r30 = REGS->r30; \
140 DEST.r31 = REGS->r31; \
141 DEST.sa0 = REGS->sa0; \
142 DEST.lc0 = REGS->lc0; \
143 DEST.sa1 = REGS->sa1; \
144 DEST.lc1 = REGS->lc1; \
145 DEST.m0 = REGS->m0; \
146 DEST.m1 = REGS->m1; \
147 DEST.usr = REGS->usr; \
148 DEST.p3_0 = REGS->preds; \
149 DEST.gp = REGS->gp; \
150 DEST.ugp = REGS->ugp; \
151 DEST.pc = pt_elr(REGS); \
152 DEST.cause = pt_cause(REGS); \
153 DEST.badva = pt_badva(REGS); \
154} while (0);
155
156
157
158/*
159 * This is used to ensure we don't load something for the wrong architecture.
160 * Checks the machine and ABI type.
161 */
162#define elf_check_arch(hdr) ((hdr)->e_machine == EM_HEXAGON)
163
164/*
165 * These are used to set parameters in the core dumps.
166 */
167#define ELF_CLASS ELFCLASS32
168#define ELF_DATA ELFDATA2LSB
169#define ELF_ARCH EM_HEXAGON
170
171#ifdef CONFIG_HEXAGON_ARCH_V2
172#define ELF_CORE_EFLAGS 0x1
173#endif
174
175#ifdef CONFIG_HEXAGON_ARCH_V3
176#define ELF_CORE_EFLAGS 0x2
177#endif
178
179#ifdef CONFIG_HEXAGON_ARCH_V4
180#define ELF_CORE_EFLAGS 0x3
181#endif
182
183/*
184 * Some architectures have ld.so set up a pointer to a function
185 * to be registered using atexit, to facilitate cleanup. So that
186 * static executables will be well-behaved, we would null the register
187 * in question here, in the pt_regs structure passed. For now,
188 * leave it a null macro.
189 */
190#define ELF_PLAT_INIT(regs, load_addr) do { } while (0)
191
192#define USE_ELF_CORE_DUMP
193#define CORE_DUMP_USE_REGSET
194
195/* Hrm is this going to cause problems for changing PAGE_SIZE? */
196#define ELF_EXEC_PAGESIZE 4096
197
198/*
199 * This is the location that an ET_DYN program is loaded if exec'ed. Typical
200 * use of this is to invoke "./ld.so someprog" to test out a new version of
201 * the loader. We need to make sure that it is out of the way of the program
202 * that it will "exec", and that there is sufficient room for the brk.
203 */
204#define ELF_ET_DYN_BASE 0x08000000UL
205
206/*
207 * This yields a mask that user programs can use to figure out what
208 * instruction set this cpu supports.
209 */
210#define ELF_HWCAP (0)
211
212/*
213 * This yields a string that ld.so will use to load implementation
214 * specific libraries for optimization. This is more specific in
215 * intent than poking at uname or /proc/cpuinfo.
216 */
217#define ELF_PLATFORM (NULL)
218
219#ifdef __KERNEL__
220#define SET_PERSONALITY(ex) \
221 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
222#endif
223
224#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
225struct linux_binprm;
226extern int arch_setup_additional_pages(struct linux_binprm *bprm,
227 int uses_interp);
228
229
230#endif
diff --git a/arch/hexagon/include/asm/exec.h b/arch/hexagon/include/asm/exec.h
deleted file mode 100644
index c32b2132614..00000000000
--- a/arch/hexagon/include/asm/exec.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Process execution related definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_EXEC_H
22#define _ASM_EXEC_H
23
24/* Should probably shoot for an 8-byte aligned stack pointer */
25#define STACK_MASK (~7)
26#define arch_align_stack(x) (x & STACK_MASK)
27
28#endif /* _ASM_EXEC_H */
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h
deleted file mode 100644
index b75b6bf4269..00000000000
--- a/arch/hexagon/include/asm/fixmap.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Fixmap support for Hexagon - enough to support highmem features
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_FIXMAP_H
22#define _ASM_FIXMAP_H
23
24/*
25 * A lot of the fixmap info is already in mem-layout.h
26 */
27#include <asm/mem-layout.h>
28
29/*
30 * Full fixmap support involves set_fixmap() functions, but
31 * these may not be needed if all we're after is an area for
32 * highmem kernel mappings.
33 */
34#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
35#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
36
37extern void __this_fixmap_does_not_exist(void);
38
39/**
40 * fix_to_virt -- "index to address" translation.
41 *
42 * If anyone tries to use the idx directly without translation,
43 * we catch the bug with a NULL-deference kernel oops. Illegal
44 * ranges of incoming indices are caught too.
45 */
46static inline unsigned long fix_to_virt(const unsigned int idx)
47{
48 /*
49 * This branch gets completely eliminated after inlining,
50 * except when someone tries to use fixaddr indices in an
51 * illegal way. (such as mixing up address types or using
52 * out-of-range indices).
53 *
54 * If it doesn't get removed, the linker will complain
55 * loudly with a reasonably clear error message..
56 */
57 if (idx >= __end_of_fixed_addresses)
58 __this_fixmap_does_not_exist();
59
60 return __fix_to_virt(idx);
61}
62
63static inline unsigned long virt_to_fix(const unsigned long vaddr)
64{
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr);
67}
68
69#define kmap_get_fixmap_pte(vaddr) \
70 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
71 (vaddr)), (vaddr)), (vaddr))
72
73#endif
diff --git a/arch/hexagon/include/asm/fpu.h b/arch/hexagon/include/asm/fpu.h
deleted file mode 100644
index 0e135ea8c45..00000000000
--- a/arch/hexagon/include/asm/fpu.h
+++ /dev/null
@@ -1,4 +0,0 @@
1/*
2 * If the FPU is used inside the kernel,
3 * kernel_fpu_end() will be defined here.
4 */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
deleted file mode 100644
index 7e597f8434d..00000000000
--- a/arch/hexagon/include/asm/futex.h
+++ /dev/null
@@ -1,137 +0,0 @@
1#ifndef _ASM_HEXAGON_FUTEX_H
2#define _ASM_HEXAGON_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <linux/uaccess.h>
8#include <asm/errno.h>
9
10/* XXX TODO-- need to add sync barriers! */
11
12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile( \
14 "1: %0 = memw_locked(%3);\n" \
15 /* For example: %1 = %4 */ \
16 insn \
17 "2: memw_locked(%3,p2) = %1;\n" \
18 " if !p2 jump 1b;\n" \
19 " %1 = #0;\n" \
20 "3:\n" \
21 ".section .fixup,\"ax\"\n" \
22 "4: %1 = #%5;\n" \
23 " jump 3b\n" \
24 ".previous\n" \
25 ".section __ex_table,\"a\"\n" \
26 ".long 1b,4b,2b,4b\n" \
27 ".previous\n" \
28 : "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \
29 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
30 : "p2", "memory")
31
32
33static inline int
34futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
35{
36 int op = (encoded_op >> 28) & 7;
37 int cmp = (encoded_op >> 24) & 15;
38 int oparg = (encoded_op << 8) >> 20;
39 int cmparg = (encoded_op << 20) >> 20;
40 int oldval = 0, ret;
41 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
42 oparg = 1 << oparg;
43
44 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
45 return -EFAULT;
46
47 pagefault_disable();
48
49 switch (op) {
50 case FUTEX_OP_SET:
51 __futex_atomic_op("%1 = %4\n", ret, oldval, uaddr, oparg);
52 break;
53 case FUTEX_OP_ADD:
54 __futex_atomic_op("%1 = add(%0,%4)\n", ret, oldval, uaddr,
55 oparg);
56 break;
57 case FUTEX_OP_OR:
58 __futex_atomic_op("%1 = or(%0,%4)\n", ret, oldval, uaddr,
59 oparg);
60 break;
61 case FUTEX_OP_ANDN:
62 __futex_atomic_op("%1 = not(%4); %1 = and(%0,%1)\n", ret,
63 oldval, uaddr, oparg);
64 break;
65 case FUTEX_OP_XOR:
66 __futex_atomic_op("%1 = xor(%0,%4)\n", ret, oldval, uaddr,
67 oparg);
68 break;
69 default:
70 ret = -ENOSYS;
71 }
72
73 pagefault_enable();
74
75 if (!ret) {
76 switch (cmp) {
77 case FUTEX_OP_CMP_EQ:
78 ret = (oldval == cmparg);
79 break;
80 case FUTEX_OP_CMP_NE:
81 ret = (oldval != cmparg);
82 break;
83 case FUTEX_OP_CMP_LT:
84 ret = (oldval < cmparg);
85 break;
86 case FUTEX_OP_CMP_GE:
87 ret = (oldval >= cmparg);
88 break;
89 case FUTEX_OP_CMP_LE:
90 ret = (oldval <= cmparg);
91 break;
92 case FUTEX_OP_CMP_GT:
93 ret = (oldval > cmparg);
94 break;
95 default:
96 ret = -ENOSYS;
97 }
98 }
99 return ret;
100}
101
102static inline int
103futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
104 u32 newval)
105{
106 int prev;
107 int ret;
108
109 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
110 return -EFAULT;
111
112 __asm__ __volatile__ (
113 "1: %1 = memw_locked(%3)\n"
114 " {\n"
115 " p2 = cmp.eq(%1,%4)\n"
116 " if !p2.new jump:NT 3f\n"
117 " }\n"
118 "2: memw_locked(%3,p2) = %5\n"
119 " if !p2 jump 1b\n"
120 "3:\n"
121 ".section .fixup,\"ax\"\n"
122 "4: %0 = #%6\n"
123 " jump 3b\n"
124 ".previous\n"
125 ".section __ex_table,\"a\"\n"
126 ".long 1b,4b,2b,4b\n"
127 ".previous\n"
128 : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
129 : "r" (uaddr), "r" (oldval), "r" (newval), "i"(-EFAULT)
130 : "p2", "memory");
131
132 *uval = prev;
133 return ret;
134}
135
136#endif /* __KERNEL__ */
137#endif /* _ASM_HEXAGON_FUTEX_H */
diff --git a/arch/hexagon/include/asm/hexagon_vm.h b/arch/hexagon/include/asm/hexagon_vm.h
deleted file mode 100644
index c144bee6cab..00000000000
--- a/arch/hexagon/include/asm/hexagon_vm.h
+++ /dev/null
@@ -1,281 +0,0 @@
1/*
2 * Declarations for to Hexagon Virtal Machine.
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef ASM_HEXAGON_VM_H
22#define ASM_HEXAGON_VM_H
23
24/*
25 * In principle, a Linux kernel for the VM could
26 * selectively define the virtual instructions
27 * as inline assembler macros, but for a first pass,
28 * we'll use subroutines for both the VM and the native
29 * kernels. It's costing a subroutine call/return,
30 * but it makes for a single set of entry points
31 * for tracing/debugging.
32 */
33
34/*
35 * Lets make this stuff visible only if configured,
36 * so we can unconditionally include the file.
37 */
38
39#ifndef __ASSEMBLY__
40
41enum VM_CACHE_OPS {
42 ickill,
43 dckill,
44 l2kill,
45 dccleaninva,
46 icinva,
47 idsync,
48 fetch_cfg
49};
50
51enum VM_INT_OPS {
52 nop,
53 globen,
54 globdis,
55 locen,
56 locdis,
57 affinity,
58 get,
59 peek,
60 status,
61 post,
62 clear
63};
64
65extern void _K_VM_event_vector(void);
66
67void __vmrte(void);
68long __vmsetvec(void *);
69long __vmsetie(long);
70long __vmgetie(void);
71long __vmintop(enum VM_INT_OPS, long, long, long, long);
72long __vmclrmap(void *, unsigned long);
73long __vmnewmap(void *);
74long __vmcache(enum VM_CACHE_OPS op, unsigned long addr, unsigned long len);
75unsigned long long __vmgettime(void);
76long __vmsettime(unsigned long long);
77long __vmstart(void *, void *);
78void __vmstop(void);
79long __vmwait(void);
80void __vmyield(void);
81long __vmvpid(void);
82
83static inline long __vmcache_ickill(void)
84{
85 return __vmcache(ickill, 0, 0);
86}
87
88static inline long __vmcache_dckill(void)
89{
90 return __vmcache(dckill, 0, 0);
91}
92
93static inline long __vmcache_l2kill(void)
94{
95 return __vmcache(l2kill, 0, 0);
96}
97
98static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len)
99{
100 return __vmcache(dccleaninva, addr, len);
101}
102
103static inline long __vmcache_icinva(unsigned long addr, unsigned long len)
104{
105 return __vmcache(icinva, addr, len);
106}
107
108static inline long __vmcache_idsync(unsigned long addr,
109 unsigned long len)
110{
111 return __vmcache(idsync, addr, len);
112}
113
114static inline long __vmcache_fetch_cfg(unsigned long val)
115{
116 return __vmcache(fetch_cfg, val, 0);
117}
118
119/* interrupt operations */
120
121static inline long __vmintop_nop(void)
122{
123 return __vmintop(nop, 0, 0, 0, 0);
124}
125
126static inline long __vmintop_globen(long i)
127{
128 return __vmintop(globen, i, 0, 0, 0);
129}
130
131static inline long __vmintop_globdis(long i)
132{
133 return __vmintop(globdis, i, 0, 0, 0);
134}
135
136static inline long __vmintop_locen(long i)
137{
138 return __vmintop(locen, i, 0, 0, 0);
139}
140
141static inline long __vmintop_locdis(long i)
142{
143 return __vmintop(locdis, i, 0, 0, 0);
144}
145
146static inline long __vmintop_affinity(long i, long cpu)
147{
148 return __vmintop(locdis, i, cpu, 0, 0);
149}
150
151static inline long __vmintop_get(void)
152{
153 return __vmintop(get, 0, 0, 0, 0);
154}
155
156static inline long __vmintop_peek(void)
157{
158 return __vmintop(peek, 0, 0, 0, 0);
159}
160
161static inline long __vmintop_status(long i)
162{
163 return __vmintop(status, i, 0, 0, 0);
164}
165
166static inline long __vmintop_post(long i)
167{
168 return __vmintop(post, i, 0, 0, 0);
169}
170
171static inline long __vmintop_clear(long i)
172{
173 return __vmintop(clear, i, 0, 0, 0);
174}
175
176#else /* Only assembly code should reference these */
177
178#define HVM_TRAP1_VMRTE 1
179#define HVM_TRAP1_VMSETVEC 2
180#define HVM_TRAP1_VMSETIE 3
181#define HVM_TRAP1_VMGETIE 4
182#define HVM_TRAP1_VMINTOP 5
183#define HVM_TRAP1_VMCLRMAP 10
184#define HVM_TRAP1_VMNEWMAP 11
185#define HVM_TRAP1_FORMERLY_VMWIRE 12
186#define HVM_TRAP1_VMCACHE 13
187#define HVM_TRAP1_VMGETTIME 14
188#define HVM_TRAP1_VMSETTIME 15
189#define HVM_TRAP1_VMWAIT 16
190#define HVM_TRAP1_VMYIELD 17
191#define HVM_TRAP1_VMSTART 18
192#define HVM_TRAP1_VMSTOP 19
193#define HVM_TRAP1_VMVPID 20
194#define HVM_TRAP1_VMSETREGS 21
195#define HVM_TRAP1_VMGETREGS 22
196
197#endif /* __ASSEMBLY__ */
198
199/*
200 * Constants for virtual instruction parameters and return values
201 */
202
203/* vmsetie arguments */
204
205#define VM_INT_DISABLE 0
206#define VM_INT_ENABLE 1
207
208/* vmsetimask arguments */
209
210#define VM_INT_UNMASK 0
211#define VM_INT_MASK 1
212
213#define VM_NEWMAP_TYPE_LINEAR 0
214#define VM_NEWMAP_TYPE_PGTABLES 1
215
216
217/*
218 * Event Record definitions useful to both C and Assembler
219 */
220
221/* VMEST Layout */
222
223#define HVM_VMEST_UM_SFT 31
224#define HVM_VMEST_UM_MSK 1
225#define HVM_VMEST_IE_SFT 30
226#define HVM_VMEST_IE_MSK 1
227#define HVM_VMEST_EVENTNUM_SFT 16
228#define HVM_VMEST_EVENTNUM_MSK 0xff
229#define HVM_VMEST_CAUSE_SFT 0
230#define HVM_VMEST_CAUSE_MSK 0xffff
231
232/*
233 * The initial program gets to find a system environment descriptor
234 * on its stack when it begins exection. The first word is a version
235 * code to indicate what is there. Zero means nothing more.
236 */
237
238#define HEXAGON_VM_SED_NULL 0
239
240/*
241 * Event numbers for vector binding
242 */
243
244#define HVM_EV_RESET 0
245#define HVM_EV_MACHCHECK 1
246#define HVM_EV_GENEX 2
247#define HVM_EV_TRAP 8
248#define HVM_EV_INTR 15
249/* These shoud be nuked as soon as we know the VM is up to spec v0.1.1 */
250#define HVM_EV_INTR_0 16
251#define HVM_MAX_INTR 240
252
253/*
254 * Cause values for General Exception
255 */
256
257#define HVM_GE_C_BUS 0x01
258#define HVM_GE_C_XPROT 0x11
259#define HVM_GE_C_XUSER 0x14
260#define HVM_GE_C_INVI 0x15
261#define HVM_GE_C_PRIVI 0x1B
262#define HVM_GE_C_XMAL 0x1C
263#define HVM_GE_C_RMAL 0x20
264#define HVM_GE_C_WMAL 0x21
265#define HVM_GE_C_RPROT 0x22
266#define HVM_GE_C_WPROT 0x23
267#define HVM_GE_C_RUSER 0x24
268#define HVM_GE_C_WUSER 0x25
269#define HVM_GE_C_CACHE 0x28
270
271/*
272 * Cause codes for Machine Check
273 */
274
275#define HVM_MCHK_C_DOWN 0x00
276#define HVM_MCHK_C_BADSP 0x01
277#define HVM_MCHK_C_BADEX 0x02
278#define HVM_MCHK_C_BADPT 0x03
279#define HVM_MCHK_C_REGWR 0x29
280
281#endif
diff --git a/arch/hexagon/include/asm/intrinsics.h b/arch/hexagon/include/asm/intrinsics.h
deleted file mode 100644
index ca587737fb2..00000000000
--- a/arch/hexagon/include/asm/intrinsics.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_HEXAGON_INTRINSICS_H
20#define _ASM_HEXAGON_INTRINSICS_H
21
22#define HEXAGON_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
23#define HEXAGON_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
24#define HEXAGON_R_cl0_R __builtin_HEXAGON_S2_cl0
25
26#endif
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
deleted file mode 100644
index e527cfeff5b..00000000000
--- a/arch/hexagon/include/asm/io.h
+++ /dev/null
@@ -1,326 +0,0 @@
1/*
2 * IO definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_IO_H
22#define _ASM_IO_H
23
24#ifdef __KERNEL__
25
26#include <linux/types.h>
27#include <linux/delay.h>
28#include <linux/vmalloc.h>
29#include <asm/string.h>
30#include <asm/mem-layout.h>
31#include <asm/iomap.h>
32#include <asm/page.h>
33#include <asm/cacheflush.h>
34#include <asm/tlbflush.h>
35
36/*
37 * We don't have PCI yet.
38 * _IO_BASE is pointing at what should be unused virtual space.
39 */
40#define IO_SPACE_LIMIT 0xffff
41#define _IO_BASE ((void __iomem *)0xfe000000)
42
43extern int remap_area_pages(unsigned long start, unsigned long phys_addr,
44 unsigned long end, unsigned long flags);
45
46extern void __iounmap(const volatile void __iomem *addr);
47
48/* Defined in lib/io.c, needed for smc91x driver. */
49extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
50extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
51
52extern void __raw_readsl(const void __iomem *addr, void *data, int wordlen);
53extern void __raw_writesl(void __iomem *addr, const void *data, int wordlen);
54
55#define readsw(p, d, l) __raw_readsw(p, d, l)
56#define writesw(p, d, l) __raw_writesw(p, d, l)
57
58#define readsl(p, d, l) __raw_readsl(p, d, l)
59#define writesl(p, d, l) __raw_writesl(p, d, l)
60
61/*
62 * virt_to_phys - map virtual address to physical
63 * @address: address to map
64 */
65static inline unsigned long virt_to_phys(volatile void *address)
66{
67 return __pa(address);
68}
69
70/*
71 * phys_to_virt - map physical address to virtual
72 * @address: address to map
73 */
74static inline void *phys_to_virt(unsigned long address)
75{
76 return __va(address);
77}
78
79/*
80 * convert a physical pointer to a virtual kernel pointer for
81 * /dev/mem access.
82 */
83#define xlate_dev_kmem_ptr(p) __va(p)
84#define xlate_dev_mem_ptr(p) __va(p)
85
86/*
87 * IO port access primitives. Hexagon doesn't have special IO access
88 * instructions; all I/O is memory mapped.
89 *
90 * in/out are used for "ports", but we don't have "port instructions",
91 * so these are really just memory mapped too.
92 */
93
94/*
95 * readb - read byte from memory mapped device
96 * @addr: pointer to memory
97 *
98 * Operates on "I/O bus memory space"
99 */
100static inline u8 readb(const volatile void __iomem *addr)
101{
102 u8 val;
103 asm volatile(
104 "%0 = memb(%1);"
105 : "=&r" (val)
106 : "r" (addr)
107 );
108 return val;
109}
110
111static inline u16 readw(const volatile void __iomem *addr)
112{
113 u16 val;
114 asm volatile(
115 "%0 = memh(%1);"
116 : "=&r" (val)
117 : "r" (addr)
118 );
119 return val;
120}
121
122static inline u32 readl(const volatile void __iomem *addr)
123{
124 u32 val;
125 asm volatile(
126 "%0 = memw(%1);"
127 : "=&r" (val)
128 : "r" (addr)
129 );
130 return val;
131}
132
133/*
134 * writeb - write a byte to a memory location
135 * @data: data to write to
136 * @addr: pointer to memory
137 *
138 */
139static inline void writeb(u8 data, volatile void __iomem *addr)
140{
141 asm volatile(
142 "memb(%0) = %1;"
143 :
144 : "r" (addr), "r" (data)
145 : "memory"
146 );
147}
148
149static inline void writew(u16 data, volatile void __iomem *addr)
150{
151 asm volatile(
152 "memh(%0) = %1;"
153 :
154 : "r" (addr), "r" (data)
155 : "memory"
156 );
157
158}
159
160static inline void writel(u32 data, volatile void __iomem *addr)
161{
162 asm volatile(
163 "memw(%0) = %1;"
164 :
165 : "r" (addr), "r" (data)
166 : "memory"
167 );
168}
169
170#define __raw_writeb writeb
171#define __raw_writew writew
172#define __raw_writel writel
173
174#define __raw_readb readb
175#define __raw_readw readw
176#define __raw_readl readl
177
178/*
179 * Need an mtype somewhere in here, for cache type deals?
180 * This is probably too long for an inline.
181 */
182void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
183
184static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
185{
186 return ioremap_nocache(phys_addr, size);
187}
188
189static inline void iounmap(volatile void __iomem *addr)
190{
191 __iounmap(addr);
192}
193
194#define __raw_writel writel
195
196static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
197 int count)
198{
199 memcpy(dst, (void *) src, count);
200}
201
202static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
203 int count)
204{
205 memcpy((void *) dst, src, count);
206}
207
208#define PCI_IO_ADDR (volatile void __iomem *)
209
210/*
211 * inb - read byte from I/O port or something
212 * @port: address in I/O space
213 *
214 * Operates on "I/O bus I/O space"
215 */
216static inline u8 inb(unsigned long port)
217{
218 return readb(_IO_BASE + (port & IO_SPACE_LIMIT));
219}
220
221static inline u16 inw(unsigned long port)
222{
223 return readw(_IO_BASE + (port & IO_SPACE_LIMIT));
224}
225
226static inline u32 inl(unsigned long port)
227{
228 return readl(_IO_BASE + (port & IO_SPACE_LIMIT));
229}
230
231/*
232 * outb - write a byte to a memory location
233 * @data: data to write to
234 * @addr: address in I/O space
235 */
236static inline void outb(u8 data, unsigned long port)
237{
238 writeb(data, _IO_BASE + (port & IO_SPACE_LIMIT));
239}
240
241static inline void outw(u16 data, unsigned long port)
242{
243 writew(data, _IO_BASE + (port & IO_SPACE_LIMIT));
244}
245
246static inline void outl(u32 data, unsigned long port)
247{
248 writel(data, _IO_BASE + (port & IO_SPACE_LIMIT));
249}
250
251#define outb_p outb
252#define outw_p outw
253#define outl_p outl
254
255#define inb_p inb
256#define inw_p inw
257#define inl_p inl
258
259static inline void insb(unsigned long port, void *buffer, int count)
260{
261 if (count) {
262 u8 *buf = buffer;
263 do {
264 u8 x = inb(port);
265 *buf++ = x;
266 } while (--count);
267 }
268}
269
270static inline void insw(unsigned long port, void *buffer, int count)
271{
272 if (count) {
273 u16 *buf = buffer;
274 do {
275 u16 x = inw(port);
276 *buf++ = x;
277 } while (--count);
278 }
279}
280
281static inline void insl(unsigned long port, void *buffer, int count)
282{
283 if (count) {
284 u32 *buf = buffer;
285 do {
286 u32 x = inw(port);
287 *buf++ = x;
288 } while (--count);
289 }
290}
291
292static inline void outsb(unsigned long port, const void *buffer, int count)
293{
294 if (count) {
295 const u8 *buf = buffer;
296 do {
297 outb(*buf++, port);
298 } while (--count);
299 }
300}
301
302static inline void outsw(unsigned long port, const void *buffer, int count)
303{
304 if (count) {
305 const u16 *buf = buffer;
306 do {
307 outw(*buf++, port);
308 } while (--count);
309 }
310}
311
312static inline void outsl(unsigned long port, const void *buffer, int count)
313{
314 if (count) {
315 const u32 *buf = buffer;
316 do {
317 outl(*buf++, port);
318 } while (--count);
319 }
320}
321
322#define flush_write_buffers() do { } while (0)
323
324#endif /* __KERNEL__ */
325
326#endif
diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h
deleted file mode 100644
index 51661db389d..00000000000
--- a/arch/hexagon/include/asm/irq.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_IRQ_H_
20#define _ASM_IRQ_H_
21
22/* Number of first-level interrupts associated with the CPU core. */
23#define HEXAGON_CPUINTS 32
24
25/*
26 * Must define NR_IRQS before including <asm-generic/irq.h>
27 * 64 == the two SIRC's, 176 == the two gpio's
28 *
29 * IRQ configuration is still in flux; defining this to a comfortably
30 * large number.
31 */
32#define NR_IRQS 512
33
34#include <asm-generic/irq.h>
35
36#endif
diff --git a/arch/hexagon/include/asm/irqflags.h b/arch/hexagon/include/asm/irqflags.h
deleted file mode 100644
index e5fd9492d60..00000000000
--- a/arch/hexagon/include/asm/irqflags.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * IRQ support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_IRQFLAGS_H
22#define _ASM_IRQFLAGS_H
23
24#include <asm/hexagon_vm.h>
25#include <linux/types.h>
26
27static inline unsigned long arch_local_save_flags(void)
28{
29 return __vmgetie();
30}
31
32static inline unsigned long arch_local_irq_save(void)
33{
34 return __vmsetie(VM_INT_DISABLE);
35}
36
37static inline bool arch_irqs_disabled_flags(unsigned long flags)
38{
39 return !flags;
40}
41
42static inline bool arch_irqs_disabled(void)
43{
44 return !__vmgetie();
45}
46
47static inline void arch_local_irq_enable(void)
48{
49 __vmsetie(VM_INT_ENABLE);
50}
51
52static inline void arch_local_irq_disable(void)
53{
54 __vmsetie(VM_INT_DISABLE);
55}
56
57static inline void arch_local_irq_restore(unsigned long flags)
58{
59 __vmsetie(flags);
60}
61
62#endif
diff --git a/arch/hexagon/include/asm/kgdb.h b/arch/hexagon/include/asm/kgdb.h
deleted file mode 100644
index 32a6fb66944..00000000000
--- a/arch/hexagon/include/asm/kgdb.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * arch/hexagon/include/asm/kgdb.h - Hexagon KGDB Support
3 *
4 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef __HEXAGON_KGDB_H__
22#define __HEXAGON_KGDB_H__
23
24#define BREAK_INSTR_SIZE 4
25#define CACHE_FLUSH_IS_SAFE 1
26#define BUFMAX ((NUMREGBYTES * 2) + 512)
27
28static inline void arch_kgdb_breakpoint(void)
29{
30 asm("trap0(#0xDB)");
31}
32
33/* Registers:
34 * 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total.
35 * vm regs = psp+elr+est+badva = 4
36 * syscall+restart = 2 more
37 * so 48 = 42 +4 + 2
38 */
39#define DBG_USER_REGS 42
40#define DBG_MAX_REG_NUM (DBG_USER_REGS + 6)
41#define NUMREGBYTES (DBG_MAX_REG_NUM*4)
42
43#endif /* __HEXAGON_KGDB_H__ */
diff --git a/arch/hexagon/include/asm/linkage.h b/arch/hexagon/include/asm/linkage.h
deleted file mode 100644
index 31b4cbe7e58..00000000000
--- a/arch/hexagon/include/asm/linkage.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef __ASM_LINKAGE_H
20#define __ASM_LINKAGE_H
21
22#define __ALIGN .align 4
23#define __ALIGN_STR ".align 4"
24
25#endif
diff --git a/arch/hexagon/include/asm/mem-layout.h b/arch/hexagon/include/asm/mem-layout.h
deleted file mode 100644
index af16e977c55..00000000000
--- a/arch/hexagon/include/asm/mem-layout.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Memory layout definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_HEXAGON_MEM_LAYOUT_H
22#define _ASM_HEXAGON_MEM_LAYOUT_H
23
24#include <linux/const.h>
25
26/*
27 * Have to do this for ginormous numbers, else they get printed as
28 * negative numbers, which the linker no likey when you try to
29 * assign it to the location counter.
30 */
31
32#define PAGE_OFFSET _AC(0xc0000000, UL)
33
34/*
35 * LOAD_ADDRESS is the physical/linear address of where in memory
36 * the kernel gets loaded. The 12 least significant bits must be zero (0)
37 * due to limitations on setting the EVB
38 *
39 */
40
41#ifndef LOAD_ADDRESS
42#define LOAD_ADDRESS 0x00000000
43#endif
44
45#define TASK_SIZE (PAGE_OFFSET)
46
47/* not sure how these are used yet */
48#define STACK_TOP TASK_SIZE
49#define STACK_TOP_MAX TASK_SIZE
50
51#ifndef __ASSEMBLY__
52enum fixed_addresses {
53 FIX_KMAP_BEGIN,
54 FIX_KMAP_END, /* check for per-cpuism */
55 __end_of_fixed_addresses
56};
57
58#define MIN_KERNEL_SEG 0x300 /* From 0xc0000000 */
59extern int max_kernel_seg;
60
61/*
62 * Start of vmalloc virtual address space for kernel;
63 * supposed to be based on the amount of physical memory available
64 */
65
66#define VMALLOC_START (PAGE_OFFSET + VMALLOC_OFFSET + \
67 (unsigned long)high_memory)
68
69/* Gap between physical ram and vmalloc space for guard purposes. */
70#define VMALLOC_OFFSET PAGE_SIZE
71
72/*
73 * Create the space between VMALLOC_START and FIXADDR_TOP backwards
74 * from the ... "top".
75 *
76 * Permanent IO mappings will live at 0xfexx_xxxx
77 * Hypervisor occupies the last 16MB page at 0xffxxxxxx
78 */
79
80#define FIXADDR_TOP 0xfe000000
81#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
82#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
83
84/*
85 * "permanent kernel mappings", defined as long-lasting mappings of
86 * high-memory page frames into the kernel address space.
87 */
88
89#define LAST_PKMAP PTRS_PER_PTE
90#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
91#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
92#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
93
94/*
95 * To the "left" of the fixed map space is the kmap space
96 *
97 * "Permanent Kernel Mappings"; fancy (or less fancy) PTE table
98 * that looks like it's actually walked.
99 * Need to check the alignment/shift usage; some archs use
100 * PMD_MASK on this value
101 */
102#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP)
103
104/*
105 * 2 pages of guard gap between where vmalloc area ends
106 * and pkmap_base begins.
107 */
108#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2)
109#endif /* !__ASSEMBLY__ */
110
111
112#endif /* _ASM_HEXAGON_MEM_LAYOUT_H */
diff --git a/arch/hexagon/include/asm/mmu.h b/arch/hexagon/include/asm/mmu.h
deleted file mode 100644
index 2288b19fd0f..00000000000
--- a/arch/hexagon/include/asm/mmu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_MMU_H
20#define _ASM_MMU_H
21
22#include <asm/vdso.h>
23
24/*
25 * Architecture-specific state for a mm_struct.
26 * For the Hexagon Virtual Machine, it can be a copy
27 * of the pointer to the page table base.
28 */
29struct mm_context {
30 unsigned long long generation;
31 unsigned long ptbase;
32 struct hexagon_vdso *vdso;
33};
34
35typedef struct mm_context mm_context_t;
36
37#endif
diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h
deleted file mode 100644
index d423d2e73c3..00000000000
--- a/arch/hexagon/include/asm/mmu_context.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * MM context support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_MMU_CONTEXT_H
22#define _ASM_MMU_CONTEXT_H
23
24#include <asm/setup.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/mem-layout.h>
28
29static inline void destroy_context(struct mm_struct *mm)
30{
31}
32
33/*
34 * VM port hides all TLB management, so "lazy TLB" isn't very
35 * meaningful. Even for ports to architectures with visble TLBs,
36 * this is almost invariably a null function.
37 */
38static inline void enter_lazy_tlb(struct mm_struct *mm,
39 struct task_struct *tsk)
40{
41}
42
43/*
44 * Architecture-specific actions, if any, for memory map deactivation.
45 */
46static inline void deactivate_mm(struct task_struct *tsk,
47 struct mm_struct *mm)
48{
49}
50
51/**
52 * init_new_context - initialize context related info for new mm_struct instance
53 * @tsk: pointer to a task struct
54 * @mm: pointer to a new mm struct
55 */
56static inline int init_new_context(struct task_struct *tsk,
57 struct mm_struct *mm)
58{
59 /* mm->context is set up by pgd_alloc */
60 return 0;
61}
62
63/*
64 * Switch active mm context
65 */
66static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
67 struct task_struct *tsk)
68{
69 int l1;
70
71 /*
72 * For virtual machine, we have to update system map if it's been
73 * touched.
74 */
75 if (next->context.generation < prev->context.generation) {
76 for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
77 next->pgd[l1] = init_mm.pgd[l1];
78
79 next->context.generation = prev->context.generation;
80 }
81
82 __vmnewmap((void *)next->context.ptbase);
83}
84
85/*
86 * Activate new memory map for task
87 */
88static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
89{
90 unsigned long flags;
91
92 local_irq_save(flags);
93 switch_mm(prev, next, current_thread_info()->task);
94 local_irq_restore(flags);
95}
96
97/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
98#include <asm-generic/mm_hooks.h>
99
100#endif
diff --git a/arch/hexagon/include/asm/module.h b/arch/hexagon/include/asm/module.h
deleted file mode 100644
index 6b4323acef4..00000000000
--- a/arch/hexagon/include/asm/module.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_MODULE_H
20#define _ASM_MODULE_H
21
22#include <asm-generic/module.h>
23
24#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " "
25
26#endif
diff --git a/arch/hexagon/include/asm/mutex.h b/arch/hexagon/include/asm/mutex.h
deleted file mode 100644
index 58b52de1bc2..00000000000
--- a/arch/hexagon/include/asm/mutex.h
+++ /dev/null
@@ -1,8 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8#include <asm-generic/mutex-xchg.h>
diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h
deleted file mode 100644
index 692adc21342..00000000000
--- a/arch/hexagon/include/asm/page.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * Page management definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_PAGE_H
22#define _ASM_PAGE_H
23
24#include <linux/const.h>
25
26/* This is probably not the most graceful way to handle this. */
27
28#ifdef CONFIG_PAGE_SIZE_4KB
29#define PAGE_SHIFT 12
30#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
31#endif
32
33#ifdef CONFIG_PAGE_SIZE_16KB
34#define PAGE_SHIFT 14
35#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
36#endif
37
38#ifdef CONFIG_PAGE_SIZE_64KB
39#define PAGE_SHIFT 16
40#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
41#endif
42
43#ifdef CONFIG_PAGE_SIZE_256KB
44#define PAGE_SHIFT 18
45#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
46#endif
47
48#ifdef CONFIG_PAGE_SIZE_1MB
49#define PAGE_SHIFT 20
50#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
51#endif
52
53/*
54 * These should be defined in hugetlb.h, but apparently not.
55 * "Huge" for us should be 4MB or 16MB, which are both represented
56 * in L1 PTE's. Right now, it's set up for 4MB.
57 */
58#ifdef CONFIG_HUGETLB_PAGE
59#define HPAGE_SHIFT 22
60#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
61#define HPAGE_MASK (~(HPAGE_SIZE-1))
62#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
63#define HVM_HUGEPAGE_SIZE 0x5
64#endif
65
66#define PAGE_SIZE (1UL << PAGE_SHIFT)
67#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
68
69#ifdef __KERNEL__
70#ifndef __ASSEMBLY__
71
72/*
73 * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
74 */
75#include <linux/pfn.h>
76
77/*
78 * We implement a two-level architecture-specific page table structure.
79 * Null intermediate page table level (pmd, pud) definitions will come from
80 * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
81 */
82typedef struct { unsigned long pte; } pte_t;
83typedef struct { unsigned long pgd; } pgd_t;
84typedef struct { unsigned long pgprot; } pgprot_t;
85typedef struct page *pgtable_t;
86
87#define pte_val(x) ((x).pte)
88#define pgd_val(x) ((x).pgd)
89#define pgprot_val(x) ((x).pgprot)
90#define __pte(x) ((pte_t) { (x) })
91#define __pgd(x) ((pgd_t) { (x) })
92#define __pgprot(x) ((pgprot_t) { (x) })
93
94/*
95 * We need a __pa and a __va routine for kernel space.
96 * MIPS says they're only used during mem_init.
97 * also, check if we need a PHYS_OFFSET.
98 */
99#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
100#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
101
102/* The "page frame" descriptor is defined in linux/mm.h */
103struct page;
104
105/* Returns page frame descriptor for virtual address. */
106#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
107
108/* Default vm area behavior is non-executable. */
109#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
110 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
111
112#define pfn_valid(pfn) ((pfn) < max_mapnr)
113#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
114
115/* Need to not use a define for linesize; may move this to another file. */
116static inline void clear_page(void *page)
117{
118 /* This can only be done on pages with L1 WB cache */
119 asm volatile(
120 " loop0(1f,%1);\n"
121 "1: { dczeroa(%0);\n"
122 " %0 = add(%0,#32); }:endloop0\n"
123 : "+r" (page)
124 : "r" (PAGE_SIZE/32)
125 : "lc0", "sa0", "memory"
126 );
127}
128
129#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
130
131/*
132 * Under assumption that kernel always "sees" user map...
133 */
134#define clear_user_page(page, vaddr, pg) clear_page(page)
135#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
136
137/*
138 * page_to_phys - convert page to physical address
139 * @page - pointer to page entry in mem_map
140 */
141#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
142
143/*
144 * For port to Hexagon Virtual Machine, MAYBE we check for attempts
145 * to reference reserved HVM space, but in any case, the VM will be
146 * protected.
147 */
148#define kern_addr_valid(addr) (1)
149
150#include <asm-generic/memory_model.h>
151/* XXX Todo: implement assembly-optimized version of getorder. */
152#include <asm-generic/getorder.h>
153
154#endif /* ifdef __ASSEMBLY__ */
155#endif /* ifdef __KERNEL__ */
156
157#endif
diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h
deleted file mode 100644
index 430978b1de3..00000000000
--- a/arch/hexagon/include/asm/perf_event.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_PERF_EVENT_H
20#define _ASM_PERF_EVENT_H
21
22#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
deleted file mode 100644
index 679bf6d6648..00000000000
--- a/arch/hexagon/include/asm/pgalloc.h
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Page table support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_PGALLOC_H
22#define _ASM_PGALLOC_H
23
24#include <asm/mem-layout.h>
25#include <asm/atomic.h>
26
27#define check_pgt_cache() do {} while (0)
28
29extern unsigned long long kmap_generation;
30
31/*
32 * Page table creation interface
33 */
34static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35{
36 pgd_t *pgd;
37
38 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
39
40 /*
41 * There may be better ways to do this, but to ensure
42 * that new address spaces always contain the kernel
43 * base mapping, and to ensure that the user area is
44 * initially marked invalid, initialize the new map
45 * map with a copy of the kernel's persistent map.
46 */
47
48 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *));
49 mm->context.generation = kmap_generation;
50
51 /* Physical version is what is passed to virtual machine on switch */
52 mm->context.ptbase = __pa(pgd);
53
54 return pgd;
55}
56
57static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
58{
59 free_page((unsigned long) pgd);
60}
61
62static inline struct page *pte_alloc_one(struct mm_struct *mm,
63 unsigned long address)
64{
65 struct page *pte;
66
67 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
68
69 if (pte)
70 pgtable_page_ctor(pte);
71
72 return pte;
73}
74
75/* _kernel variant gets to use a different allocator */
76static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
77 unsigned long address)
78{
79 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
80 return (pte_t *) __get_free_page(flags);
81}
82
83static inline void pte_free(struct mm_struct *mm, struct page *pte)
84{
85 pgtable_page_dtor(pte);
86 __free_page(pte);
87}
88
89static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
90{
91 free_page((unsigned long)pte);
92}
93
94static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
95 pgtable_t pte)
96{
97 /*
98 * Conveniently, zero in 3 LSB means indirect 4K page table.
99 * Not so convenient when you're trying to vary the page size.
100 */
101 set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
102 HEXAGON_L1_PTE_SIZE));
103}
104
105/*
106 * Other architectures seem to have ways of making all processes
107 * share the same pmd's for their kernel mappings, but the v0.3
108 * Hexagon VM spec has a "monolithic" L1 table for user and kernel
109 * segments. We track "generations" of the kernel map to minimize
110 * overhead, and update the "slave" copies of the kernel mappings
111 * as part of switch_mm. However, we still need to update the
112 * kernel map of the active thread who's calling pmd_populate_kernel...
113 */
114static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
115 pte_t *pte)
116{
117 extern spinlock_t kmap_gen_lock;
118 pmd_t *ppmd;
119 int pmdindex;
120
121 spin_lock(&kmap_gen_lock);
122 kmap_generation++;
123 mm->context.generation = kmap_generation;
124 current->active_mm->context.generation = kmap_generation;
125 spin_unlock(&kmap_gen_lock);
126
127 set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
128
129 /*
130 * Now the "slave" copy of the current thread.
131 * This is pointer arithmetic, not byte addresses!
132 */
133 pmdindex = (pgd_t *)pmd - mm->pgd;
134 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
135 set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
136 if (pmdindex > max_kernel_seg)
137 max_kernel_seg = pmdindex;
138}
139
140#define __pte_free_tlb(tlb, pte, addr) \
141do { \
142 pgtable_page_dtor((pte)); \
143 tlb_remove_page((tlb), (pte)); \
144} while (0)
145
146#endif
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
deleted file mode 100644
index 20d55f69fe5..00000000000
--- a/arch/hexagon/include/asm/pgtable.h
+++ /dev/null
@@ -1,518 +0,0 @@
1/*
2 * Page table support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_PGTABLE_H
22#define _ASM_PGTABLE_H
23
24/*
25 * Page table definitions for Qualcomm Hexagon processor.
26 */
27#include <linux/swap.h>
28#include <asm/page.h>
29#include <asm-generic/pgtable-nopmd.h>
30
31/* A handy thing to have if one has the RAM. Declared in head.S */
32extern unsigned long empty_zero_page;
33extern unsigned long zero_page_mask;
34
35/*
36 * The PTE model described here is that of the Hexagon Virtual Machine,
37 * which autonomously walks 2-level page tables. At a lower level, we
38 * also describe the RISCish software-loaded TLB entry structure of
39 * the underlying Hexagon processor. A kernel built to run on the
40 * virtual machine has no need to know about the underlying hardware.
41 */
42#include <asm/vm_mmu.h>
43
44/*
45 * To maximize the comfort level for the PTE manipulation macros,
46 * define the "well known" architecture-specific bits.
47 */
48#define _PAGE_READ __HVM_PTE_R
49#define _PAGE_WRITE __HVM_PTE_W
50#define _PAGE_EXECUTE __HVM_PTE_X
51#define _PAGE_USER __HVM_PTE_U
52
53/*
54 * We have a total of 4 "soft" bits available in the abstract PTE.
55 * The two mandatory software bits are Dirty and Accessed.
56 * To make nonlinear swap work according to the more recent
57 * model, we want a low order "Present" bit to indicate whether
58 * the PTE describes MMU programming or swap space.
59 */
60#define _PAGE_PRESENT (1<<0)
61#define _PAGE_DIRTY (1<<1)
62#define _PAGE_ACCESSED (1<<2)
63
64/*
65 * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while
66 * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true.
67 * So we can overload the bit...
68 */
69#define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */
70
71/*
72 * For now, let's say that Valid and Present are the same thing.
73 * Alternatively, we could say that it's the "or" of R, W, and X
74 * permissions.
75 */
76#define _PAGE_VALID _PAGE_PRESENT
77
78/*
79 * We're not defining _PAGE_GLOBAL here, since there's no concept
80 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
81 * and we want to use the same page table structures and macros in
82 * the native kernel as we do in the virtual machine kernel.
83 * So we'll put up with a bit of inefficiency for now...
84 */
85
86/*
87 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
88 * only the second from the bottom, pgd and pud both being collapsed.
89 * Each entry represents 4MB of virtual address space, 4K of table
90 * thus maps the full 4GB.
91 */
92#define PGDIR_SHIFT 22
93#define PTRS_PER_PGD 1024
94
95#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
96#define PGDIR_MASK (~(PGDIR_SIZE-1))
97
98#ifdef CONFIG_PAGE_SIZE_4KB
99#define PTRS_PER_PTE 1024
100#endif
101
102#ifdef CONFIG_PAGE_SIZE_16KB
103#define PTRS_PER_PTE 256
104#endif
105
106#ifdef CONFIG_PAGE_SIZE_64KB
107#define PTRS_PER_PTE 64
108#endif
109
110#ifdef CONFIG_PAGE_SIZE_256KB
111#define PTRS_PER_PTE 16
112#endif
113
114#ifdef CONFIG_PAGE_SIZE_1MB
115#define PTRS_PER_PTE 4
116#endif
117
118/* Any bigger and the PTE disappears. */
119#define pgd_ERROR(e) \
120 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
121 pgd_val(e))
122
123/*
124 * Page Protection Constants. Includes (in this variant) cache attributes.
125 */
126extern unsigned long _dflt_cache_att;
127
128#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \
129 _dflt_cache_att)
130#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
131 _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
132#define PAGE_COPY PAGE_READONLY
133#define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
134 _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
135#define PAGE_COPY_EXEC PAGE_EXEC
136#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
137 _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
138#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \
139 _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
140
141
142/*
143 * Aliases for mapping mmap() protection bits to page protections.
144 * These get used for static initialization, so using the _dflt_cache_att
145 * variable for the default cache attribute isn't workable. If the
146 * default gets changed at boot time, the boot option code has to
147 * update data structures like the protaction_map[] array.
148 */
149#define CACHEDEF (CACHE_DEFAULT << 6)
150
151/* Private (copy-on-write) page protections. */
152#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
153#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
154#define __P010 __P000 /* Write-only copy-on-write */
155#define __P011 __P001 /* Read/Write copy-on-write */
156#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
157 _PAGE_EXECUTE | CACHEDEF)
158#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
159 _PAGE_READ | CACHEDEF)
160#define __P110 __P100 /* Write/execute copy-on-write */
161#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
162
163/* Shared page protections. */
164#define __S000 __P000
165#define __S001 __P001
166#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
167 _PAGE_WRITE | CACHEDEF)
168#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
169 _PAGE_WRITE | CACHEDEF)
170#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
171 _PAGE_EXECUTE | CACHEDEF)
172#define __S101 __P101
173#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
174 _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
175#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
176 _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
177
178extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
179
180/* Seems to be zero even in architectures where the zero page is firewalled? */
181#define FIRST_USER_ADDRESS 0
182#define pte_special(pte) 0
183#define pte_mkspecial(pte) (pte)
184
185/* HUGETLB not working currently */
186#ifdef CONFIG_HUGETLB_PAGE
187#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
188#endif
189
190/*
191 * For now, assume that higher-level code will do TLB/MMU invalidations
192 * and don't insert that overhead into this low-level function.
193 */
194extern void sync_icache_dcache(pte_t pte);
195
196#define pte_present_exec_user(pte) \
197 ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
198 (_PAGE_EXECUTE | _PAGE_USER))
199
200static inline void set_pte(pte_t *ptep, pte_t pteval)
201{
202 /* should really be using pte_exec, if it weren't declared later. */
203 if (pte_present_exec_user(pteval))
204 sync_icache_dcache(pteval);
205
206 *ptep = pteval;
207}
208
209/*
210 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
211 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
212 * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7
213 * as a universal null entry, but some of those least significant bits
214 * are interpreted by software.
215 */
216#define _NULL_PMD 0x7
217#define _NULL_PTE 0x0
218
219static inline void pmd_clear(pmd_t *pmd_entry_ptr)
220{
221 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
222}
223
224/*
225 * Conveniently, a null PTE value is invalid.
226 */
227static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
228 pte_t *ptep)
229{
230 pte_val(*ptep) = _NULL_PTE;
231}
232
233#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
234/**
235 * pmd_index - returns the index of the entry in the PMD page
236 * which would control the given virtual address
237 */
238#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
239
240#endif
241
242/**
243 * pgd_index - returns the index of the entry in the PGD page
244 * which would control the given virtual address
245 *
246 * This returns the *index* for the address in the pgd_t
247 */
248#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
249
250/*
251 * pgd_offset - find an offset in a page-table-directory
252 */
253#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
254
255/*
256 * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
257 */
258#define pgd_offset_k(address) pgd_offset(&init_mm, address)
259
260/**
261 * pmd_none - check if pmd_entry is mapped
262 * @pmd_entry: pmd entry
263 *
264 * MIPS checks it against that "invalid pte table" thing.
265 */
266static inline int pmd_none(pmd_t pmd)
267{
268 return pmd_val(pmd) == _NULL_PMD;
269}
270
271/**
272 * pmd_present - is there a page table behind this?
273 * Essentially the inverse of pmd_none. We maybe
274 * save an inline instruction by defining it this
275 * way, instead of simply "!pmd_none".
276 */
277static inline int pmd_present(pmd_t pmd)
278{
279 return pmd_val(pmd) != (unsigned long)_NULL_PMD;
280}
281
282/**
283 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
284 * As we have no known cause of badness, it's null, as it is for many
285 * architectures.
286 */
287static inline int pmd_bad(pmd_t pmd)
288{
289 return 0;
290}
291
292/*
293 * pmd_page - converts a PMD entry to a page pointer
294 */
295#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
296#define pmd_pgtable(pmd) pmd_page(pmd)
297
298/**
299 * pte_none - check if pte is mapped
300 * @pte: pte_t entry
301 */
302static inline int pte_none(pte_t pte)
303{
304 return pte_val(pte) == _NULL_PTE;
305};
306
307/*
308 * pte_present - check if page is present
309 */
310static inline int pte_present(pte_t pte)
311{
312 return pte_val(pte) & _PAGE_PRESENT;
313}
314
315/* mk_pte - make a PTE out of a page pointer and protection bits */
316#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
317
318/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
319#define pte_page(x) pfn_to_page(pte_pfn(x))
320
321/* pte_mkold - mark PTE as not recently accessed */
322static inline pte_t pte_mkold(pte_t pte)
323{
324 pte_val(pte) &= ~_PAGE_ACCESSED;
325 return pte;
326}
327
328/* pte_mkyoung - mark PTE as recently accessed */
329static inline pte_t pte_mkyoung(pte_t pte)
330{
331 pte_val(pte) |= _PAGE_ACCESSED;
332 return pte;
333}
334
335/* pte_mkclean - mark page as in sync with backing store */
336static inline pte_t pte_mkclean(pte_t pte)
337{
338 pte_val(pte) &= ~_PAGE_DIRTY;
339 return pte;
340}
341
342/* pte_mkdirty - mark page as modified */
343static inline pte_t pte_mkdirty(pte_t pte)
344{
345 pte_val(pte) |= _PAGE_DIRTY;
346 return pte;
347}
348
349/* pte_young - "is PTE marked as accessed"? */
350static inline int pte_young(pte_t pte)
351{
352 return pte_val(pte) & _PAGE_ACCESSED;
353}
354
355/* pte_dirty - "is PTE dirty?" */
356static inline int pte_dirty(pte_t pte)
357{
358 return pte_val(pte) & _PAGE_DIRTY;
359}
360
361/* pte_modify - set protection bits on PTE */
362static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
363{
364 pte_val(pte) &= PAGE_MASK;
365 pte_val(pte) |= pgprot_val(prot);
366 return pte;
367}
368
369/* pte_wrprotect - mark page as not writable */
370static inline pte_t pte_wrprotect(pte_t pte)
371{
372 pte_val(pte) &= ~_PAGE_WRITE;
373 return pte;
374}
375
376/* pte_mkwrite - mark page as writable */
377static inline pte_t pte_mkwrite(pte_t pte)
378{
379 pte_val(pte) |= _PAGE_WRITE;
380 return pte;
381}
382
383/* pte_mkexec - mark PTE as executable */
384static inline pte_t pte_mkexec(pte_t pte)
385{
386 pte_val(pte) |= _PAGE_EXECUTE;
387 return pte;
388}
389
390/* pte_read - "is PTE marked as readable?" */
391static inline int pte_read(pte_t pte)
392{
393 return pte_val(pte) & _PAGE_READ;
394}
395
396/* pte_write - "is PTE marked as writable?" */
397static inline int pte_write(pte_t pte)
398{
399 return pte_val(pte) & _PAGE_WRITE;
400}
401
402
403/* pte_exec - "is PTE marked as executable?" */
404static inline int pte_exec(pte_t pte)
405{
406 return pte_val(pte) & _PAGE_EXECUTE;
407}
408
409/* __pte_to_swp_entry - extract swap entry from PTE */
410#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
411
412/* __swp_entry_to_pte - extract PTE from swap entry */
413#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
414
415/* pfn_pte - convert page number and protection value to page table entry */
416#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
417
418/* pte_pfn - convert pte to page frame number */
419#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
420#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
421
422/*
423 * set_pte_at - update page table and do whatever magic may be
424 * necessary to make the underlying hardware/firmware take note.
425 *
426 * VM may require a virtual instruction to alert the MMU.
427 */
428#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
429
430/*
431 * May need to invoke the virtual machine as well...
432 */
433#define pte_unmap(pte) do { } while (0)
434#define pte_unmap_nested(pte) do { } while (0)
435
436/*
437 * pte_offset_map - returns the linear address of the page table entry
438 * corresponding to an address
439 */
440#define pte_offset_map(dir, address) \
441 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
442
443#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
444
445/* pte_offset_kernel - kernel version of pte_offset */
446#define pte_offset_kernel(dir, address) \
447 ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
448 + __pte_offset(address))
449
450/* ZERO_PAGE - returns the globally shared zero page */
451#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
452
453#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
454
455/* Nothing special about IO remapping at this point */
456#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
457 remap_pfn_range(vma, vaddr, pfn, size, prot)
458
459/* I think this is in case we have page table caches; needed by init/main.c */
460#define pgtable_cache_init() do { } while (0)
461
462/*
463 * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the
464 * PTE is interpreted as swap information. Depending on the _PAGE_FILE
465 * bit, the remaining free bits are eitehr interpreted as a file offset
466 * or a swap type/offset tuple. Rather than have the TLB fill handler
467 * test _PAGE_PRESENT, we're going to reserve the permissions bits
468 * and set them to all zeros for swap entries, which speeds up the
469 * miss handler at the cost of 3 bits of offset. That trade-off can
470 * be revisited if necessary, but Hexagon processor architecture and
471 * target applications suggest a lot of TLB misses and not much swap space.
472 *
473 * Format of swap PTE:
474 * bit 0: Present (zero)
475 * bit 1: _PAGE_FILE (zero)
476 * bits 2-6: swap type (arch independent layer uses 5 bits max)
477 * bits 7-9: bits 2:0 of offset
478 * bits 10-12: effectively _PAGE_PROTNONE (all zero)
479 * bits 13-31: bits 21:3 of swap offset
480 *
481 * Format of file PTE:
482 * bit 0: Present (zero)
483 * bit 1: _PAGE_FILE (zero)
484 * bits 2-9: bits 7:0 of offset
485 * bits 10-12: effectively _PAGE_PROTNONE (all zero)
486 * bits 13-31: bits 26:8 of swap offset
487 *
488 * The split offset makes some of the following macros a little gnarly,
489 * but there's plenty of precedent for this sort of thing.
490 */
491#define PTE_FILE_MAX_BITS 27
492
493/* Used for swap PTEs */
494#define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f)
495
496#define __swp_offset(swp_pte) \
497 ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8))
498
499#define __swp_entry(type, offset) \
500 ((swp_entry_t) { \
501 ((type << 2) | \
502 ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
503
504/* Used for file PTEs */
505#define pte_file(pte) \
506 ((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE)
507
508#define pte_to_pgoff(pte) \
509 (((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00))
510
511#define pgoff_to_pte(off) \
512 ((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\
513 | _PAGE_FILE) })
514
515/* Oh boy. There are a lot of possible arch overrides found in this file. */
516#include <asm-generic/pgtable.h>
517
518#endif
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
deleted file mode 100644
index 6dd5d370686..00000000000
--- a/arch/hexagon/include/asm/processor.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Process/processor support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_PROCESSOR_H
22#define _ASM_PROCESSOR_H
23
24#ifndef __ASSEMBLY__
25
26#include <asm/mem-layout.h>
27#include <asm/registers.h>
28#include <asm/hexagon_vm.h>
29
30/* must be a macro */
31#define current_text_addr() ({ __label__ _l; _l: &&_l; })
32
33/* task_struct, defined elsewhere, is the "process descriptor" */
34struct task_struct;
35
36/* this is defined in arch/process.c */
37extern unsigned long thread_saved_pc(struct task_struct *tsk);
38
39extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
40
41/*
42 * thread_struct is supposed to be for context switch data.
43 * Specifically, to hold the state necessary to perform switch_to...
44 */
45struct thread_struct {
46 void *switch_sp;
47};
48
49/*
50 * initializes thread_struct
51 * The only thing we have in there is switch_sp
52 * which doesn't really need to be initialized.
53 */
54
55#define INIT_THREAD { \
56}
57
58#define cpu_relax() __vmyield()
59
60/*
61 * Decides where the kernel will search for a free chunk of vm space during
62 * mmaps.
63 * See also arch_get_unmapped_area.
64 * Doesn't affect if you have MAX_FIXED in the page flags set though...
65 *
66 * Apparently the convention is that ld.so will ask for "unmapped" private
67 * memory to be allocated SOMEWHERE, but it also asks for memory explicitly
68 * via MAP_FIXED at the lower * addresses starting at VA=0x0.
69 *
70 * If the two requests collide, you get authentic segfaulting action, so
71 * you have to kick the "unmapped" base requests higher up.
72 */
73#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3))
74
75
76#define task_pt_regs(task) \
77 ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
78
79#define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk)))
80#define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk)))
81
82/* Free all resources held by a thread; defined in process.c */
83extern void release_thread(struct task_struct *dead_task);
84
85/* Get wait channel for task P. */
86extern unsigned long get_wchan(struct task_struct *p);
87
88/* The following stuff is pretty HEXAGON specific. */
89
90/* This is really just here for __switch_to.
91 Offsets are pulled via asm-offsets.c */
92
93/*
94 * No real reason why VM and native switch stacks should be different.
95 * Ultimately this should merge. Note that Rev C. ABI called out only
96 * R24-27 as callee saved GPRs needing explicit attention (R29-31 being
97 * dealt with automagically by allocframe), but the current ABI has
98 * more, R16-R27. By saving more, the worst case is that we waste some
99 * cycles if building with the old compilers.
100 */
101
102struct hexagon_switch_stack {
103 unsigned long long r1716;
104 unsigned long long r1918;
105 unsigned long long r2120;
106 unsigned long long r2322;
107 unsigned long long r2524;
108 unsigned long long r2726;
109 unsigned long fp;
110 unsigned long lr;
111};
112
113#endif /* !__ASSEMBLY__ */
114
115#endif
diff --git a/arch/hexagon/include/asm/smp.h b/arch/hexagon/include/asm/smp.h
deleted file mode 100644
index 2b9b974e095..00000000000
--- a/arch/hexagon/include/asm/smp.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * SMP definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef __ASM_SMP_H
22#define __ASM_SMP_H
23
24#include <linux/cpumask.h>
25
26#define raw_smp_processor_id() (current_thread_info()->cpu)
27
28enum ipi_message_type {
29 IPI_NOP = 0,
30 IPI_RESCHEDULE = 1,
31 IPI_CALL_FUNC,
32 IPI_CALL_FUNC_SINGLE,
33 IPI_CPU_STOP,
34 IPI_TIMER,
35};
36
37extern void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg);
38extern void smp_start_cpus(void);
39extern void arch_send_call_function_single_ipi(int cpu);
40extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
41
42extern void smp_vm_unmask_irq(void *info);
43
44#endif
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
deleted file mode 100644
index 12ca4ebc033..00000000000
--- a/arch/hexagon/include/asm/spinlock.h
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Spinlock support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_SPINLOCK_H
23#define _ASM_SPINLOCK_H
24
25#include <asm/irqflags.h>
26
27/*
28 * This file is pulled in for SMP builds.
29 * Really need to check all the barrier stuff for "true" SMP
30 */
31
32/*
33 * Read locks:
34 * - load the lock value
35 * - increment it
36 * - if the lock value is still negative, go back and try again.
37 * - unsuccessful store is unsuccessful. Go back and try again. Loser.
38 * - successful store new lock value if positive -> lock acquired
39 */
40static inline void arch_read_lock(arch_rwlock_t *lock)
41{
42 __asm__ __volatile__(
43 "1: R6 = memw_locked(%0);\n"
44 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
45 " { if !P3 jump 1b; }\n"
46 " memw_locked(%0,P3) = R6;\n"
47 " { if !P3 jump 1b; }\n"
48 :
49 : "r" (&lock->lock)
50 : "memory", "r6", "p3"
51 );
52
53}
54
55static inline void arch_read_unlock(arch_rwlock_t *lock)
56{
57 __asm__ __volatile__(
58 "1: R6 = memw_locked(%0);\n"
59 " R6 = add(R6,#-1);\n"
60 " memw_locked(%0,P3) = R6\n"
61 " if !P3 jump 1b;\n"
62 :
63 : "r" (&lock->lock)
64 : "memory", "r6", "p3"
65 );
66
67}
68
69/* I think this returns 0 on fail, 1 on success. */
70static inline int arch_read_trylock(arch_rwlock_t *lock)
71{
72 int temp;
73 __asm__ __volatile__(
74 " R6 = memw_locked(%1);\n"
75 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
76 " { if !P3 jump 1f; }\n"
77 " memw_locked(%1,P3) = R6;\n"
78 " { %0 = P3 }\n"
79 "1:\n"
80 : "=&r" (temp)
81 : "r" (&lock->lock)
82 : "memory", "r6", "p3"
83 );
84 return temp;
85}
86
87static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
88{
89 return rwlock->lock == 0;
90}
91
92static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
93{
94 return rwlock->lock == 0;
95}
96
97/* Stuffs a -1 in the lock value? */
98static inline void arch_write_lock(arch_rwlock_t *lock)
99{
100 __asm__ __volatile__(
101 "1: R6 = memw_locked(%0)\n"
102 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
103 " { if !P3 jump 1b; }\n"
104 " memw_locked(%0,P3) = R6;\n"
105 " { if !P3 jump 1b; }\n"
106 :
107 : "r" (&lock->lock)
108 : "memory", "r6", "p3"
109 );
110}
111
112
113static inline int arch_write_trylock(arch_rwlock_t *lock)
114{
115 int temp;
116 __asm__ __volatile__(
117 " R6 = memw_locked(%1)\n"
118 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
119 " { if !P3 jump 1f; }\n"
120 " memw_locked(%1,P3) = R6;\n"
121 " %0 = P3;\n"
122 "1:\n"
123 : "=&r" (temp)
124 : "r" (&lock->lock)
125 : "memory", "r6", "p3"
126 );
127 return temp;
128
129}
130
131static inline void arch_write_unlock(arch_rwlock_t *lock)
132{
133 smp_mb();
134 lock->lock = 0;
135}
136
137static inline void arch_spin_lock(arch_spinlock_t *lock)
138{
139 __asm__ __volatile__(
140 "1: R6 = memw_locked(%0);\n"
141 " P3 = cmp.eq(R6,#0);\n"
142 " { if !P3 jump 1b; R6 = #1; }\n"
143 " memw_locked(%0,P3) = R6;\n"
144 " { if !P3 jump 1b; }\n"
145 :
146 : "r" (&lock->lock)
147 : "memory", "r6", "p3"
148 );
149
150}
151
152static inline void arch_spin_unlock(arch_spinlock_t *lock)
153{
154 smp_mb();
155 lock->lock = 0;
156}
157
158static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
159{
160 int temp;
161 __asm__ __volatile__(
162 " R6 = memw_locked(%1);\n"
163 " P3 = cmp.eq(R6,#0);\n"
164 " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
165 " memw_locked(%1,P3) = R6;\n"
166 " %0 = P3;\n"
167 "1:\n"
168 : "=&r" (temp)
169 : "r" (&lock->lock)
170 : "memory", "r6", "p3"
171 );
172 return temp;
173}
174
175/*
176 * SMP spinlocks are intended to allow only a single CPU at the lock
177 */
178#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
179#define arch_spin_unlock_wait(lock) \
180 do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
181#define arch_spin_is_locked(x) ((x)->lock != 0)
182
183#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
184#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
185
186#endif
diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h
deleted file mode 100644
index 7a906b5214a..00000000000
--- a/arch/hexagon/include/asm/spinlock_types.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Spinlock support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_SPINLOCK_TYPES_H
22#define _ASM_SPINLOCK_TYPES_H
23
24#ifndef __LINUX_SPINLOCK_TYPES_H
25# error "please don't include this file directly"
26#endif
27
28typedef struct {
29 volatile unsigned int lock;
30} arch_spinlock_t;
31
32#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
33
34typedef struct {
35 volatile unsigned int lock;
36} arch_rwlock_t;
37
38#define __ARCH_RW_LOCK_UNLOCKED { 0 }
39
40#endif
diff --git a/arch/hexagon/include/asm/string.h b/arch/hexagon/include/asm/string.h
deleted file mode 100644
index 7d37f47a1d0..00000000000
--- a/arch/hexagon/include/asm/string.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_STRING_H_
20#define _ASM_STRING_H_
21
22#ifdef __KERNEL__
23#define __HAVE_ARCH_MEMCPY
24extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
25
26/* ToDo: use dczeroa, accelerate the compiler-constant zero case */
27#define __HAVE_ARCH_MEMSET
28extern void *memset(void *__to, int c, size_t __n);
29#endif
30
31
32#endif /* _ASM_STRING_H_ */
diff --git a/arch/hexagon/include/asm/suspend.h b/arch/hexagon/include/asm/suspend.h
deleted file mode 100644
index 18b44b557fb..00000000000
--- a/arch/hexagon/include/asm/suspend.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_SUSPEND_H
20#define _ASM_SUSPEND_H
21
22static inline int arch_prepare_suspend(void)
23{
24 return 0;
25}
26
27#endif
diff --git a/arch/hexagon/include/asm/switch_to.h b/arch/hexagon/include/asm/switch_to.h
deleted file mode 100644
index 96745e7b3e3..00000000000
--- a/arch/hexagon/include/asm/switch_to.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Task switching definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_SWITCH_TO_H
22#define _ASM_SWITCH_TO_H
23
24struct thread_struct;
25
26extern struct task_struct *__switch_to(struct task_struct *,
27 struct task_struct *,
28 struct task_struct *);
29
30#define switch_to(p, n, r) do {\
31 r = __switch_to((p), (n), (r));\
32} while (0)
33
34#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
deleted file mode 100644
index 4af9c7b6f13..00000000000
--- a/arch/hexagon/include/asm/syscall.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Syscall support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_HEXAGON_SYSCALL_H
22#define _ASM_HEXAGON_SYSCALL_H
23
24typedef long (*syscall_fn)(unsigned long, unsigned long,
25 unsigned long, unsigned long,
26 unsigned long, unsigned long);
27
28#include <asm-generic/syscalls.h>
29
30extern void *sys_call_table[];
31
32static inline long syscall_get_nr(struct task_struct *task,
33 struct pt_regs *regs)
34{
35 return regs->r06;
36}
37
38static inline void syscall_get_arguments(struct task_struct *task,
39 struct pt_regs *regs,
40 unsigned int i, unsigned int n,
41 unsigned long *args)
42{
43 BUG_ON(i + n > 6);
44 memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
45}
46#endif
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
deleted file mode 100644
index f7c32406a71..00000000000
--- a/arch/hexagon/include/asm/thread_info.h
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * Thread support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_THREAD_INFO_H
22#define _ASM_THREAD_INFO_H
23
24#ifdef __KERNEL__
25
26#ifndef __ASSEMBLY__
27#include <asm/processor.h>
28#include <asm/registers.h>
29#include <asm/page.h>
30#endif
31
32#define THREAD_SHIFT 12
33#define THREAD_SIZE (1<<THREAD_SHIFT)
34#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
35
36#ifndef __ASSEMBLY__
37
38typedef struct {
39 unsigned long seg;
40} mm_segment_t;
41
42/*
43 * This is union'd with the "bottom" of the kernel stack.
44 * It keeps track of thread info which is handy for routines
45 * to access quickly.
46 */
47
48struct thread_info {
49 struct task_struct *task; /* main task structure */
50 struct exec_domain *exec_domain; /* execution domain */
51 unsigned long flags; /* low level flags */
52 __u32 cpu; /* current cpu */
53 int preempt_count; /* 0=>preemptible,<0=>BUG */
54 mm_segment_t addr_limit; /* segmentation sux */
55 /*
56 * used for syscalls somehow;
57 * seems to have a function pointer and four arguments
58 */
59 struct restart_block restart_block;
60 /* Points to the current pt_regs frame */
61 struct pt_regs *regs;
62 /*
63 * saved kernel sp at switch_to time;
64 * not sure if this is used (it's not in the VM model it seems;
65 * see thread_struct)
66 */
67 unsigned long sp;
68};
69
70#else /* !__ASSEMBLY__ */
71
72#include <asm/asm-offsets.h>
73
74#endif /* __ASSEMBLY__ */
75
76/* looks like "linux/hardirq.h" uses this. */
77
78#define PREEMPT_ACTIVE 0x10000000
79
80#ifndef __ASSEMBLY__
81
82#define INIT_THREAD_INFO(tsk) \
83{ \
84 .task = &tsk, \
85 .exec_domain = &default_exec_domain, \
86 .flags = 0, \
87 .cpu = 0, \
88 .preempt_count = 1, \
89 .addr_limit = KERNEL_DS, \
90 .restart_block = { \
91 .fn = do_no_restart_syscall, \
92 }, \
93 .sp = 0, \
94 .regs = NULL, \
95}
96
97#define init_thread_info (init_thread_union.thread_info)
98#define init_stack (init_thread_union.stack)
99
100/* Tacky preprocessor trickery */
101#define qqstr(s) qstr(s)
102#define qstr(s) #s
103#define QUOTED_THREADINFO_REG qqstr(THREADINFO_REG)
104
105register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG);
106#define current_thread_info() __current_thread_info
107
108#endif /* __ASSEMBLY__ */
109
110/*
111 * thread information flags
112 * - these are process state flags that various assembly files
113 * may need to access
114 * - pending work-to-be-done flags are in LSW
115 * - other flags in MSW
116 */
117
118#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
119#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
120#define TIF_SIGPENDING 2 /* signal pending */
121#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
122#define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */
123#define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */
124/* true if poll_idle() is polling TIF_NEED_RESCHED */
125#define TIF_MEMDIE 17 /* OOM killer killed process */
126
127#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
128#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
129#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
130#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
131#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
132
133/* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */
134#define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE)
135
136/* work to do on any return to u-space */
137#define _TIF_ALLWORK_MASK 0x0000FFFF
138
139#endif /* __KERNEL__ */
140
141#endif
diff --git a/arch/hexagon/include/asm/time.h b/arch/hexagon/include/asm/time.h
deleted file mode 100644
index deda170c03b..00000000000
--- a/arch/hexagon/include/asm/time.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef ASM_TIME_H
20#define ASM_TIME_H
21
22extern cycles_t pcycle_freq_mhz;
23extern cycles_t thread_freq_mhz;
24extern cycles_t sleep_clk_freq;
25
26void setup_percpu_clockdev(void);
27void ipi_timer(void);
28
29#endif
diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h
deleted file mode 100644
index 79912b8c1e5..00000000000
--- a/arch/hexagon/include/asm/timer-regs.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Timer support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_TIMER_REGS_H
22#define _ASM_TIMER_REGS_H
23
24/* This stuff should go into a platform specific file */
25#define TCX0_CLK_RATE 19200
26#define TIMER_ENABLE 0
27#define TIMER_CLR_ON_MATCH 1
28
29/*
30 * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
31 * release 1.1, and then it's "adjustable" and probably not defaulted.
32 */
33#define RTOS_TIMER_INT 3
34#ifdef CONFIG_HEXAGON_COMET
35#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
36#endif
37#define SLEEP_CLK_RATE 32000
38
39#endif
diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
deleted file mode 100644
index f63fe132f07..00000000000
--- a/arch/hexagon/include/asm/timex.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_TIMEX_H
20#define _ASM_TIMEX_H
21
22#include <asm-generic/timex.h>
23#include <asm/timer-regs.h>
24
25/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
26#define CLOCK_TICK_RATE TCX0_CLK_RATE
27
28#define ARCH_HAS_READ_CURRENT_TIMER
29
30static inline int read_current_timer(unsigned long *timer_val)
31{
32 *timer_val = (unsigned long) __vmgettime();
33 return 0;
34}
35
36#endif
diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h
deleted file mode 100644
index 2f00772cc08..00000000000
--- a/arch/hexagon/include/asm/tlb.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_TLB_H
20#define _ASM_TLB_H
21
22#include <linux/pagemap.h>
23#include <asm/tlbflush.h>
24
25/*
26 * We don't need any special per-pte or per-vma handling...
27 */
28#define tlb_start_vma(tlb, vma) do { } while (0)
29#define tlb_end_vma(tlb, vma) do { } while (0)
30#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
31
32/*
33 * .. because we flush the whole mm when it fills up
34 */
35#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
36
37#include <asm-generic/tlb.h>
38
39#endif
diff --git a/arch/hexagon/include/asm/tlbflush.h b/arch/hexagon/include/asm/tlbflush.h
deleted file mode 100644
index 62d95a9705c..00000000000
--- a/arch/hexagon/include/asm/tlbflush.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * TLB flush support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_TLBFLUSH_H
22#define _ASM_TLBFLUSH_H
23
24#include <linux/mm.h>
25#include <asm/processor.h>
26
27/*
28 * TLB flushing -- in "SMP", these routines get defined to be the
29 * ones from smp.c, else they are some local flavors.
30 */
31
32/*
33 * These functions are commonly macros, but in the interests of
34 * VM vs. native implementation and code size, we simply declare
35 * the function prototypes here.
36 */
37extern void tlb_flush_all(void);
38extern void flush_tlb_mm(struct mm_struct *mm);
39extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
40extern void flush_tlb_range(struct vm_area_struct *vma,
41 unsigned long start, unsigned long end);
42extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
43extern void flush_tlb_one(unsigned long);
44
45/*
46 * "This is called in munmap when we have freed up some page-table pages.
47 * We don't need to do anything here..."
48 *
49 * The VM kernel doesn't walk page tables, and they are passed to the VMM
50 * by logical address. There doesn't seem to be any possibility that they
51 * could be referenced by the VM kernel based on a stale mapping, since
52 * they would only be located by consulting the mm structure, and they
53 * will have been purged from that structure by the munmap. Seems like
54 * a noop on HVM as well.
55 */
56#define flush_tlb_pgtables(mm, start, end)
57
58#endif
diff --git a/arch/hexagon/include/asm/traps.h b/arch/hexagon/include/asm/traps.h
deleted file mode 100644
index ec11285012c..00000000000
--- a/arch/hexagon/include/asm/traps.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Trap support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_HEXAGON_TRAPS_H
22#define _ASM_HEXAGON_TRAPS_H
23
24#include <asm/registers.h>
25
26extern int die(const char *str, struct pt_regs *regs, long err);
27extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
28
29#endif /* _ASM_HEXAGON_TRAPS_H */
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
deleted file mode 100644
index e4127e4d6a5..00000000000
--- a/arch/hexagon/include/asm/uaccess.h
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * User memory access support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_UACCESS_H
22#define _ASM_UACCESS_H
23/*
24 * User space memory access functions
25 */
26#include <linux/sched.h>
27#include <linux/mm.h>
28#include <asm/segment.h>
29#include <asm/sections.h>
30
31/*
32 * access_ok: - Checks if a user space pointer is valid
33 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
34 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
35 * to write to a block, it is always safe to read from it.
36 * @addr: User space pointer to start of block to check
37 * @size: Size of block to check
38 *
39 * Context: User context only. This function may sleep.
40 *
41 * Checks if a pointer to a block of memory in user space is valid.
42 *
43 * Returns true (nonzero) if the memory block *may* be valid, false (zero)
44 * if it is definitely invalid.
45 *
46 * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
47 * simple MSB-based tests used by MIPS won't work. Some further
48 * optimization is probably possible here, but for now, keep it
49 * reasonably simple and not *too* slow. After all, we've got the
50 * MMU for backup.
51 */
52#define VERIFY_READ 0
53#define VERIFY_WRITE 1
54
55#define __access_ok(addr, size) \
56 ((get_fs().seg == KERNEL_DS.seg) || \
57 (((unsigned long)addr < get_fs().seg) && \
58 (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
59
60/*
61 * When a kernel-mode page fault is taken, the faulting instruction
62 * address is checked against a table of exception_table_entries.
63 * Each entry is a tuple of the address of an instruction that may
64 * be authorized to fault, and the address at which execution should
65 * be resumed instead of the faulting instruction, so as to effect
66 * a workaround.
67 */
68
69/* Assembly somewhat optimized copy routines */
70unsigned long __copy_from_user_hexagon(void *to, const void __user *from,
71 unsigned long n);
72unsigned long __copy_to_user_hexagon(void __user *to, const void *from,
73 unsigned long n);
74
75#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n)
76#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
77
78/*
79 * XXX todo: some additonal performance gain is possible by
80 * implementing __copy_to/from_user_inatomic, which is much
81 * like __copy_to/from_user, but performs slightly less checking.
82 */
83
84__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
85#define __clear_user(a, s) __clear_user_hexagon((a), (s))
86
87#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
88
89/* get around the ifndef in asm-generic/uaccess.h */
90#define __strnlen_user __strnlen_user
91
92extern long __strnlen_user(const char __user *src, long n);
93
94static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
95 long n);
96
97#include <asm-generic/uaccess.h>
98
99/* Todo: an actual accelerated version of this. */
100static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
101 long n)
102{
103 long res = __strnlen_user(src, n);
104
105 /* return from strnlen can't be zero -- that would be rubbish. */
106
107 if (res > n) {
108 copy_from_user(dst, src, n);
109 return n;
110 } else {
111 copy_from_user(dst, src, res);
112 return res-1;
113 }
114}
115
116#endif
diff --git a/arch/hexagon/include/asm/vdso.h b/arch/hexagon/include/asm/vdso.h
deleted file mode 100644
index ed08e6c6886..00000000000
--- a/arch/hexagon/include/asm/vdso.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * vDSO implementation for Hexagon
3 *
4 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef __ASM_VDSO_H
22#define __ASM_VDSO_H
23
24#include <linux/types.h>
25
26struct hexagon_vdso {
27 u32 rt_signal_trampoline[2];
28};
29
30#endif /* __ASM_VDSO_H */
diff --git a/arch/hexagon/include/asm/vm_fault.h b/arch/hexagon/include/asm/vm_fault.h
deleted file mode 100644
index 9b0e9c50ced..00000000000
--- a/arch/hexagon/include/asm/vm_fault.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_HEXAGON_VM_FAULT_H
20#define _ASM_HEXAGON_VM_FAULT_H
21
22extern void execute_protection_fault(struct pt_regs *);
23extern void write_protection_fault(struct pt_regs *);
24extern void read_protection_fault(struct pt_regs *);
25
26#endif
diff --git a/arch/hexagon/include/asm/vm_mmu.h b/arch/hexagon/include/asm/vm_mmu.h
deleted file mode 100644
index 9a94de7969b..00000000000
--- a/arch/hexagon/include/asm/vm_mmu.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Hexagon VM page table entry definitions
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_VM_MMU_H
22#define _ASM_VM_MMU_H
23
24/*
25 * Shift, mask, and other constants for the Hexagon Virtual Machine
26 * page tables.
27 *
28 * Virtual machine MMU allows first-level entries to either be
29 * single-level lookup PTEs for very large pages, or PDEs pointing
30 * to second-level PTEs for smaller pages. If PTE is single-level,
31 * the least significant bits cannot be used as software bits to encode
32 * virtual memory subsystem information about the page, and that state
33 * must be maintained in some parallel data structure.
34 */
35
36/* S or Page Size field in PDE */
37#define __HVM_PDE_S (0x7 << 0)
38#define __HVM_PDE_S_4KB 0
39#define __HVM_PDE_S_16KB 1
40#define __HVM_PDE_S_64KB 2
41#define __HVM_PDE_S_256KB 3
42#define __HVM_PDE_S_1MB 4
43#define __HVM_PDE_S_4MB 5
44#define __HVM_PDE_S_16MB 6
45#define __HVM_PDE_S_INVALID 7
46
47/* Masks for L2 page table pointer, as function of page size */
48#define __HVM_PDE_PTMASK_4KB 0xfffff000
49#define __HVM_PDE_PTMASK_16KB 0xfffffc00
50#define __HVM_PDE_PTMASK_64KB 0xffffff00
51#define __HVM_PDE_PTMASK_256KB 0xffffffc0
52#define __HVM_PDE_PTMASK_1MB 0xfffffff0
53
54/*
55 * Virtual Machine PTE Bits/Fields
56 */
57#define __HVM_PTE_T (1<<4)
58#define __HVM_PTE_U (1<<5)
59#define __HVM_PTE_C (0x7<<6)
60#define __HVM_PTE_CVAL(pte) (((pte) & __HVM_PTE_C) >> 6)
61#define __HVM_PTE_R (1<<9)
62#define __HVM_PTE_W (1<<10)
63#define __HVM_PTE_X (1<<11)
64
65/*
66 * Cache Attributes, to be shifted as necessary for virtual/physical PTEs
67 */
68
69#define __HEXAGON_C_WB 0x0 /* Write-back, no L2 */
70#define __HEXAGON_C_WT 0x1 /* Write-through, no L2 */
71#define __HEXAGON_C_DEV 0x4 /* Device register space */
72#define __HEXAGON_C_WT_L2 0x5 /* Write-through, with L2 */
73/* this really should be #if CONFIG_HEXAGON_ARCH = 2 but that's not defined */
74#if defined(CONFIG_HEXAGON_COMET) || defined(CONFIG_QDSP6_ST1)
75#define __HEXAGON_C_UNC __HEXAGON_C_DEV
76#else
77#define __HEXAGON_C_UNC 0x6 /* Uncached memory */
78#endif
79#define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */
80
81/*
82 * This can be overriden, but we're defaulting to the most aggressive
83 * cache policy, the better to find bugs sooner.
84 */
85
86#define CACHE_DEFAULT __HEXAGON_C_WB_L2
87
88/* Masks for physical page address, as a function of page size */
89
90#define __HVM_PTE_PGMASK_4KB 0xfffff000
91#define __HVM_PTE_PGMASK_16KB 0xffffc000
92#define __HVM_PTE_PGMASK_64KB 0xffff0000
93#define __HVM_PTE_PGMASK_256KB 0xfffc0000
94#define __HVM_PTE_PGMASK_1MB 0xfff00000
95
96/* Masks for single-level large page lookups */
97
98#define __HVM_PTE_PGMASK_4MB 0xffc00000
99#define __HVM_PTE_PGMASK_16MB 0xff000000
100
101/*
102 * "Big kernel page mappings" (see vm_init_segtable.S)
103 * are currently 16MB
104 */
105
106#define BIG_KERNEL_PAGE_SHIFT 24
107#define BIG_KERNEL_PAGE_SIZE (1 << BIG_KERNEL_PAGE_SHIFT)
108
109
110
111#endif /* _ASM_VM_MMU_H */
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
deleted file mode 100644
index c31706c3863..00000000000
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ /dev/null
@@ -1,15 +0,0 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
4header-y += bitsperlong.h
5header-y += byteorder.h
6header-y += kvm_para.h
7header-y += param.h
8header-y += ptrace.h
9header-y += registers.h
10header-y += setup.h
11header-y += sigcontext.h
12header-y += signal.h
13header-y += swab.h
14header-y += unistd.h
15header-y += user.h
diff --git a/arch/hexagon/include/uapi/asm/bitsperlong.h b/arch/hexagon/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 4a658151383..00000000000
--- a/arch/hexagon/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef __ASM_HEXAGON_BITSPERLONG_H
20#define __ASM_HEXAGON_BITSPERLONG_H
21
22#define __BITS_PER_LONG 32
23
24#include <asm-generic/bitsperlong.h>
25
26#endif
diff --git a/arch/hexagon/include/uapi/asm/byteorder.h b/arch/hexagon/include/uapi/asm/byteorder.h
deleted file mode 100644
index e31f3f7d9a4..00000000000
--- a/arch/hexagon/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_BYTEORDER_H
20#define _ASM_BYTEORDER_H
21
22#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
23# define __BYTEORDER_HAS_U64__
24#endif
25
26#include <linux/byteorder/little_endian.h>
27
28#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b95..00000000000
--- a/arch/hexagon/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/hexagon/include/uapi/asm/param.h b/arch/hexagon/include/uapi/asm/param.h
deleted file mode 100644
index 5cec8c0417f..00000000000
--- a/arch/hexagon/include/uapi/asm/param.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_PARAM_H
20#define _ASM_PARAM_H
21
22#define EXEC_PAGESIZE 16384
23
24#include <asm-generic/param.h>
25
26#endif
diff --git a/arch/hexagon/include/uapi/asm/ptrace.h b/arch/hexagon/include/uapi/asm/ptrace.h
deleted file mode 100644
index 1ffce0c6ee0..00000000000
--- a/arch/hexagon/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Ptrace definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_PTRACE_H
22#define _ASM_PTRACE_H
23
24#include <asm/registers.h>
25
26#define instruction_pointer(regs) pt_elr(regs)
27#define user_stack_pointer(regs) ((regs)->r29)
28
29#define profile_pc(regs) instruction_pointer(regs)
30
31/* kprobe-based event tracer support */
32extern int regs_query_register_offset(const char *name);
33extern const char *regs_query_register_name(unsigned int offset);
34
35#define current_pt_regs() \
36 ((struct pt_regs *) \
37 ((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
38
39#endif
diff --git a/arch/hexagon/include/uapi/asm/registers.h b/arch/hexagon/include/uapi/asm/registers.h
deleted file mode 100644
index c20406f63b5..00000000000
--- a/arch/hexagon/include/uapi/asm/registers.h
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * Register definitions for the Hexagon architecture
3 */
4
5
6#ifndef _ASM_REGISTERS_H
7#define _ASM_REGISTERS_H
8
9#define SP r29
10
11#ifndef __ASSEMBLY__
12
13/* See kernel/entry.S for further documentation. */
14
15/*
16 * Entry code copies the event record out of guest registers into
17 * this structure (which is on the stack).
18 */
19
20struct hvm_event_record {
21 unsigned long vmel; /* Event Linkage (return address) */
22 unsigned long vmest; /* Event context - pre-event SSR values */
23 unsigned long vmpsp; /* Previous stack pointer */
24 unsigned long vmbadva; /* Bad virtual address for addressing events */
25};
26
27struct pt_regs {
28 long restart_r0; /* R0 checkpoint for syscall restart */
29 long syscall_nr; /* Only used in system calls */
30 union {
31 struct {
32 unsigned long usr;
33 unsigned long preds;
34 };
35 long long int predsusr;
36 };
37 union {
38 struct {
39 unsigned long m0;
40 unsigned long m1;
41 };
42 long long int m1m0;
43 };
44 union {
45 struct {
46 unsigned long sa1;
47 unsigned long lc1;
48 };
49 long long int lc1sa1;
50 };
51 union {
52 struct {
53 unsigned long sa0;
54 unsigned long lc0;
55 };
56 long long int lc0sa0;
57 };
58 union {
59 struct {
60 unsigned long gp;
61 unsigned long ugp;
62 };
63 long long int ugpgp;
64 };
65 /*
66 * Be extremely careful with rearranging these, if at all. Some code
67 * assumes the 32 registers exist exactly like this in memory;
68 * e.g. kernel/ptrace.c
69 * e.g. kernel/signal.c (restore_sigcontext)
70 */
71 union {
72 struct {
73 unsigned long r00;
74 unsigned long r01;
75 };
76 long long int r0100;
77 };
78 union {
79 struct {
80 unsigned long r02;
81 unsigned long r03;
82 };
83 long long int r0302;
84 };
85 union {
86 struct {
87 unsigned long r04;
88 unsigned long r05;
89 };
90 long long int r0504;
91 };
92 union {
93 struct {
94 unsigned long r06;
95 unsigned long r07;
96 };
97 long long int r0706;
98 };
99 union {
100 struct {
101 unsigned long r08;
102 unsigned long r09;
103 };
104 long long int r0908;
105 };
106 union {
107 struct {
108 unsigned long r10;
109 unsigned long r11;
110 };
111 long long int r1110;
112 };
113 union {
114 struct {
115 unsigned long r12;
116 unsigned long r13;
117 };
118 long long int r1312;
119 };
120 union {
121 struct {
122 unsigned long r14;
123 unsigned long r15;
124 };
125 long long int r1514;
126 };
127 union {
128 struct {
129 unsigned long r16;
130 unsigned long r17;
131 };
132 long long int r1716;
133 };
134 union {
135 struct {
136 unsigned long r18;
137 unsigned long r19;
138 };
139 long long int r1918;
140 };
141 union {
142 struct {
143 unsigned long r20;
144 unsigned long r21;
145 };
146 long long int r2120;
147 };
148 union {
149 struct {
150 unsigned long r22;
151 unsigned long r23;
152 };
153 long long int r2322;
154 };
155 union {
156 struct {
157 unsigned long r24;
158 unsigned long r25;
159 };
160 long long int r2524;
161 };
162 union {
163 struct {
164 unsigned long r26;
165 unsigned long r27;
166 };
167 long long int r2726;
168 };
169 union {
170 struct {
171 unsigned long r28;
172 unsigned long r29;
173 };
174 long long int r2928;
175 };
176 union {
177 struct {
178 unsigned long r30;
179 unsigned long r31;
180 };
181 long long int r3130;
182 };
183 /* VM dispatch pushes event record onto stack - we can build on it */
184 struct hvm_event_record hvmer;
185};
186
187/* Defines to conveniently access the values */
188
189/*
190 * As of the VM spec 0.5, these registers are now set/retrieved via a
191 * VM call. On the in-bound side, we just fetch the values
192 * at the entry points and stuff them into the old record in pt_regs.
193 * However, on the outbound side, probably at VM rte, we set the
194 * registers back.
195 */
196
197#define pt_elr(regs) ((regs)->hvmer.vmel)
198#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
199#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
200#define user_mode(regs) \
201 (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
202#define ints_enabled(regs) \
203 (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
204#define pt_psp(regs) ((regs)->hvmer.vmpsp)
205#define pt_badva(regs) ((regs)->hvmer.vmbadva)
206
207#define pt_set_rte_sp(regs, sp) do {\
208 pt_psp(regs) = (sp);\
209 (regs)->SP = (unsigned long) &((regs)->hvmer);\
210 } while (0)
211
212#define pt_set_kmode(regs) \
213 (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
214
215#define pt_set_usermode(regs) \
216 (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
217 | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
218
219#endif /* ifndef __ASSEMBLY */
220
221#endif
diff --git a/arch/hexagon/include/uapi/asm/setup.h b/arch/hexagon/include/uapi/asm/setup.h
deleted file mode 100644
index e48285e4af9..00000000000
--- a/arch/hexagon/include/uapi/asm/setup.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_SETUP_H
20#define _ASM_SETUP_H
21
22#include <linux/init.h>
23#include <asm-generic/setup.h>
24
25extern char external_cmdline_buffer;
26
27void __init setup_arch_memory(void);
28
29#endif
diff --git a/arch/hexagon/include/uapi/asm/sigcontext.h b/arch/hexagon/include/uapi/asm/sigcontext.h
deleted file mode 100644
index b6ba5369ccc..00000000000
--- a/arch/hexagon/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_SIGCONTEXT_H
20#define _ASM_SIGCONTEXT_H
21
22#include <asm/user.h>
23
24/*
25 * Signal context structure - contains all info to do with the state
26 * before the signal handler was invoked. Note: only add new entries
27 * to the end of the structure.
28 */
29struct sigcontext {
30 struct user_regs_struct sc_regs;
31} __aligned(8);
32
33#endif
diff --git a/arch/hexagon/include/uapi/asm/signal.h b/arch/hexagon/include/uapi/asm/signal.h
deleted file mode 100644
index 939556817d3..00000000000
--- a/arch/hexagon/include/uapi/asm/signal.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_SIGNAL_H
20#define _ASM_SIGNAL_H
21
22extern unsigned long __rt_sigtramp_template[2];
23
24#include <asm-generic/signal.h>
25
26#endif
diff --git a/arch/hexagon/include/uapi/asm/swab.h b/arch/hexagon/include/uapi/asm/swab.h
deleted file mode 100644
index 9069e9247a2..00000000000
--- a/arch/hexagon/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_SWAB_H
20#define _ASM_SWAB_H
21
22#define __SWAB_64_THRU_32__
23
24#endif
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
deleted file mode 100644
index 4a87cc47075..00000000000
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Syscall support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * The kernel pulls this unistd.h in three different ways:
23 * 1. the "normal" way which gets all the __NR defines
24 * 2. with __SYSCALL defined to produce function declarations
25 * 3. with __SYSCALL defined to produce syscall table initialization
26 * See also: syscalltab.c
27 */
28
29#define sys_mmap2 sys_mmap_pgoff
30#define __ARCH_WANT_SYS_CLONE
31
32#include <asm-generic/unistd.h>
diff --git a/arch/hexagon/include/uapi/asm/user.h b/arch/hexagon/include/uapi/asm/user.h
deleted file mode 100644
index cef13ee1413..00000000000
--- a/arch/hexagon/include/uapi/asm/user.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef HEXAGON_ASM_USER_H
2#define HEXAGON_ASM_USER_H
3
4/*
5 * Layout for registers passed in elf core dumps to userspace.
6 *
7 * Basically a rearranged subset of "pt_regs".
8 *
9 * Interested parties: libc, gdb...
10 */
11
12struct user_regs_struct {
13 unsigned long r0;
14 unsigned long r1;
15 unsigned long r2;
16 unsigned long r3;
17 unsigned long r4;
18 unsigned long r5;
19 unsigned long r6;
20 unsigned long r7;
21 unsigned long r8;
22 unsigned long r9;
23 unsigned long r10;
24 unsigned long r11;
25 unsigned long r12;
26 unsigned long r13;
27 unsigned long r14;
28 unsigned long r15;
29 unsigned long r16;
30 unsigned long r17;
31 unsigned long r18;
32 unsigned long r19;
33 unsigned long r20;
34 unsigned long r21;
35 unsigned long r22;
36 unsigned long r23;
37 unsigned long r24;
38 unsigned long r25;
39 unsigned long r26;
40 unsigned long r27;
41 unsigned long r28;
42 unsigned long r29;
43 unsigned long r30;
44 unsigned long r31;
45 unsigned long sa0;
46 unsigned long lc0;
47 unsigned long sa1;
48 unsigned long lc1;
49 unsigned long m0;
50 unsigned long m1;
51 unsigned long usr;
52 unsigned long p3_0;
53 unsigned long gp;
54 unsigned long ugp;
55 unsigned long pc;
56 unsigned long cause;
57 unsigned long badva;
58 unsigned long pad1; /* pad out to 48 words total */
59 unsigned long pad2; /* pad out to 48 words total */
60 unsigned long pad3; /* pad out to 48 words total */
61};
62
63#endif
diff --git a/arch/hexagon/kernel/Makefile b/arch/hexagon/kernel/Makefile
deleted file mode 100644
index 6c19501b487..00000000000
--- a/arch/hexagon/kernel/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
1extra-y := head.o vmlinux.lds
2
3obj-$(CONFIG_SMP) += smp.o topology.o
4
5obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
6obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
7
8obj-$(CONFIG_KGDB) += kgdb.o
9obj-$(CONFIG_MODULES) += module.o hexagon_ksyms.o
10
11# Modules required to work with the Hexagon Virtual Machine
12obj-y += vm_entry.o vm_events.o vm_switch.o vm_ops.o vm_init_segtable.o
13obj-y += vm_vectors.o
14
15obj-$(CONFIG_HAS_DMA) += dma.o
16
17obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c
deleted file mode 100644
index 2d5e84d3b00..00000000000
--- a/arch/hexagon/kernel/asm-offsets.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright (C) 1996 David S. Miller
3 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
4 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
5 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
6 * Copyright (C) 2000 MIPS Technologies, Inc.
7 *
8 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 and
12 * only version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 * 02110-1301, USA.
23 */
24
25#include <linux/compat.h>
26#include <linux/types.h>
27#include <linux/sched.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/kbuild.h>
31#include <asm/ptrace.h>
32#include <asm/processor.h>
33
34/* This file is used to produce asm/linkerscript constants from header
35 files typically used in c. Specifically, it generates asm-offsets.h */
36
37int main(void)
38{
39 COMMENT("This is a comment.");
40 /* might get these from somewhere else. */
41 DEFINE(_PAGE_SIZE, PAGE_SIZE);
42 DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
43 BLANK();
44
45 COMMENT("Hexagon pt_regs definitions");
46 OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
47 OFFSET(_PT_UGPGP, pt_regs, ugpgp);
48 OFFSET(_PT_R3130, pt_regs, r3130);
49 OFFSET(_PT_R2928, pt_regs, r2928);
50 OFFSET(_PT_R2726, pt_regs, r2726);
51 OFFSET(_PT_R2524, pt_regs, r2524);
52 OFFSET(_PT_R2322, pt_regs, r2322);
53 OFFSET(_PT_R2120, pt_regs, r2120);
54 OFFSET(_PT_R1918, pt_regs, r1918);
55 OFFSET(_PT_R1716, pt_regs, r1716);
56 OFFSET(_PT_R1514, pt_regs, r1514);
57 OFFSET(_PT_R1312, pt_regs, r1312);
58 OFFSET(_PT_R1110, pt_regs, r1110);
59 OFFSET(_PT_R0908, pt_regs, r0908);
60 OFFSET(_PT_R0706, pt_regs, r0706);
61 OFFSET(_PT_R0504, pt_regs, r0504);
62 OFFSET(_PT_R0302, pt_regs, r0302);
63 OFFSET(_PT_R0100, pt_regs, r0100);
64 OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
65 OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
66 OFFSET(_PT_M1M0, pt_regs, m1m0);
67 OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
68 OFFSET(_PT_EVREC, pt_regs, hvmer);
69 OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
70 OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
71 OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
72 OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
73 DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
74 BLANK();
75
76 COMMENT("Hexagon thread_info definitions");
77 OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
78 OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
79 OFFSET(_THREAD_INFO_SP, thread_info, sp);
80 DEFINE(_THREAD_SIZE, THREAD_SIZE);
81 BLANK();
82
83 COMMENT("Hexagon hexagon_switch_stack definitions");
84 OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
85 OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
86 OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
87 OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);
88
89 OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
90 OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
91 OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
92 OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
93 DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
94 BLANK();
95
96 COMMENT("Hexagon task_struct definitions");
97 OFFSET(_TASK_THREAD_INFO, task_struct, stack);
98 OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
99
100 COMMENT("Hexagon thread_struct definitions");
101 OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);
102
103 return 0;
104}
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
deleted file mode 100644
index 65c7bdcf565..00000000000
--- a/arch/hexagon/kernel/dma.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/*
2 * DMA implementation for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/dma-mapping.h>
22#include <linux/bootmem.h>
23#include <linux/genalloc.h>
24#include <asm/dma-mapping.h>
25#include <linux/module.h>
26
27struct dma_map_ops *dma_ops;
28EXPORT_SYMBOL(dma_ops);
29
30int bad_dma_address; /* globals are automatically initialized to zero */
31
32int dma_supported(struct device *dev, u64 mask)
33{
34 if (mask == DMA_BIT_MASK(32))
35 return 1;
36 else
37 return 0;
38}
39EXPORT_SYMBOL(dma_supported);
40
41int dma_set_mask(struct device *dev, u64 mask)
42{
43 if (!dev->dma_mask || !dma_supported(dev, mask))
44 return -EIO;
45
46 *dev->dma_mask = mask;
47
48 return 0;
49}
50EXPORT_SYMBOL(dma_set_mask);
51
52static struct gen_pool *coherent_pool;
53
54
55/* Allocates from a pool of uncached memory that was reserved at boot time */
56
57static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
58 dma_addr_t *dma_addr, gfp_t flag,
59 struct dma_attrs *attrs)
60{
61 void *ret;
62
63 if (coherent_pool == NULL) {
64 coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
65
66 if (coherent_pool == NULL)
67 panic("Can't create %s() memory pool!", __func__);
68 else
69 gen_pool_add(coherent_pool,
70 (PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)),
71 hexagon_coherent_pool_size, -1);
72 }
73
74 ret = (void *) gen_pool_alloc(coherent_pool, size);
75
76 if (ret) {
77 memset(ret, 0, size);
78 *dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
79 } else
80 *dma_addr = ~0;
81
82 return ret;
83}
84
85static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
86 dma_addr_t dma_addr, struct dma_attrs *attrs)
87{
88 gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
89}
90
91static int check_addr(const char *name, struct device *hwdev,
92 dma_addr_t bus, size_t size)
93{
94 if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
95 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
96 printk(KERN_ERR
97 "%s: overflow %Lx+%zu of device mask %Lx\n",
98 name, (long long)bus, size,
99 (long long)*hwdev->dma_mask);
100 return 0;
101 }
102 return 1;
103}
104
105static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
106 int nents, enum dma_data_direction dir,
107 struct dma_attrs *attrs)
108{
109 struct scatterlist *s;
110 int i;
111
112 WARN_ON(nents == 0 || sg[0].length == 0);
113
114 for_each_sg(sg, s, nents, i) {
115 s->dma_address = sg_phys(s);
116 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
117 return 0;
118
119 s->dma_length = s->length;
120
121 flush_dcache_range(PAGE_OFFSET + s->dma_address,
122 PAGE_OFFSET + s->dma_address + s->length);
123 }
124
125 return nents;
126}
127
128/*
129 * address is virtual
130 */
131static inline void dma_sync(void *addr, size_t size,
132 enum dma_data_direction dir)
133{
134 switch (dir) {
135 case DMA_TO_DEVICE:
136 hexagon_clean_dcache_range((unsigned long) addr,
137 (unsigned long) addr + size);
138 break;
139 case DMA_FROM_DEVICE:
140 hexagon_inv_dcache_range((unsigned long) addr,
141 (unsigned long) addr + size);
142 break;
143 case DMA_BIDIRECTIONAL:
144 flush_dcache_range((unsigned long) addr,
145 (unsigned long) addr + size);
146 break;
147 default:
148 BUG();
149 }
150}
151
152static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
153{
154 return phys_to_virt((unsigned long) dma_addr);
155}
156
157/**
158 * hexagon_map_page() - maps an address for device DMA
159 * @dev: pointer to DMA device
160 * @page: pointer to page struct of DMA memory
161 * @offset: offset within page
162 * @size: size of memory to map
163 * @dir: transfer direction
164 * @attrs: pointer to DMA attrs (not used)
165 *
166 * Called to map a memory address to a DMA address prior
167 * to accesses to/from device.
168 *
169 * We don't particularly have many hoops to jump through
170 * so far. Straight translation between phys and virtual.
171 *
172 * DMA is not cache coherent so sync is necessary; this
173 * seems to be a convenient place to do it.
174 *
175 */
176static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
177 unsigned long offset, size_t size,
178 enum dma_data_direction dir,
179 struct dma_attrs *attrs)
180{
181 dma_addr_t bus = page_to_phys(page) + offset;
182 WARN_ON(size == 0);
183
184 if (!check_addr("map_single", dev, bus, size))
185 return bad_dma_address;
186
187 dma_sync(dma_addr_to_virt(bus), size, dir);
188
189 return bus;
190}
191
192static void hexagon_sync_single_for_cpu(struct device *dev,
193 dma_addr_t dma_handle, size_t size,
194 enum dma_data_direction dir)
195{
196 dma_sync(dma_addr_to_virt(dma_handle), size, dir);
197}
198
199static void hexagon_sync_single_for_device(struct device *dev,
200 dma_addr_t dma_handle, size_t size,
201 enum dma_data_direction dir)
202{
203 dma_sync(dma_addr_to_virt(dma_handle), size, dir);
204}
205
206struct dma_map_ops hexagon_dma_ops = {
207 .alloc = hexagon_dma_alloc_coherent,
208 .free = hexagon_free_coherent,
209 .map_sg = hexagon_map_sg,
210 .map_page = hexagon_map_page,
211 .sync_single_for_cpu = hexagon_sync_single_for_cpu,
212 .sync_single_for_device = hexagon_sync_single_for_device,
213 .is_phys = 1,
214};
215
216void __init hexagon_dma_init(void)
217{
218 if (dma_ops)
219 return;
220
221 dma_ops = &hexagon_dma_ops;
222}
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
deleted file mode 100644
index d859402c73b..00000000000
--- a/arch/hexagon/kernel/head.S
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Early kernel startup code for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/asm-offsets.h>
25#include <asm/mem-layout.h>
26#include <asm/vm_mmu.h>
27#include <asm/page.h>
28
29 __INIT
30ENTRY(stext)
31 /*
32 * VMM will already have set up true vector page, MMU, etc.
33 * To set up initial kernel identity map, we have to pass
34 * the VMM a pointer to some canonical page tables. In
35 * this implementation, we're assuming that we've got
36 * them precompiled. Generate value in R24, as we'll need
37 * it again shortly.
38 */
39 r24.L = #LO(swapper_pg_dir)
40 r24.H = #HI(swapper_pg_dir)
41
42 /*
43 * Symbol is kernel segment address, but we need
44 * the logical/physical address.
45 */
46 r24 = asl(r24, #2)
47 r24 = lsr(r24, #2)
48
49 r0 = r24
50
51 /*
52 * Initialize a 16MB PTE to make the virtual and physical
53 * addresses where the kernel was loaded be identical.
54 */
55#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
56 | __HEXAGON_C_WB_L2 << 6 \
57 | __HVM_PDE_S_4MB)
58
59 r1 = pc
60 r2.H = #0xffc0
61 r2.L = #0x0000
62 r1 = and(r1,r2) /* round PC to 4MB boundary */
63 r2 = lsr(r1, #22) /* 4MB page number */
64 r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
65 r0 = add(r0,r2) /* r0 = address of correct PTE */
66 r2 = #PTE_BITS
67 r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
68 r2.h = #0x0040
69 r2.l = #0x0000 /* 4MB */
70 memw(r0 ++ #4) = r1
71 r1 = add(r1, r2)
72 memw(r0 ++ #4) = r1
73
74 r0 = r24
75
76 /*
77 * The subroutine wrapper around the virtual instruction touches
78 * no memory, so we should be able to use it even here.
79 */
80 call __vmnewmap;
81
82 /* Jump into virtual address range. */
83
84 r31.h = #hi(__head_s_vaddr_target)
85 r31.l = #lo(__head_s_vaddr_target)
86 jumpr r31
87
88 /* Insert trippy space effects. */
89
90__head_s_vaddr_target:
91 /*
92 * Tear down VA=PA translation now that we are running
93 * in the desgnated kernel segments.
94 */
95 r0 = #__HVM_PDE_S_INVALID
96 r1 = r24
97 loop0(1f,#0x100)
981:
99 {
100 memw(R1 ++ #4) = R0
101 }:endloop0
102
103 r0 = r24
104 call __vmnewmap
105
106 /* Go ahead and install the trap0 return so angel calls work */
107 r0.h = #hi(_K_provisional_vec)
108 r0.l = #lo(_K_provisional_vec)
109 call __vmsetvec
110
111 /*
112 * OK, at this point we should start to be much more careful,
113 * we're going to enter C code and start touching memory
114 * in all sorts of places.
115 * This means:
116 * SGP needs to be OK
117 * Need to lock shared resources
118 * A bunch of other things that will cause
119 * all kinds of painful bugs
120 */
121
122 /*
123 * Stack pointer should be pointed at the init task's
124 * thread stack, which should have been declared in arch/init_task.c.
125 * So uhhhhh...
126 * It's accessible via the init_thread_union, which is a union
127 * of a thread_info struct and a stack; of course, the top
128 * of the stack is not for you. The end of the stack
129 * is simply init_thread_union + THREAD_SIZE.
130 */
131
132 {r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
133 {r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
134
135 /* initialize the register used to point to current_thread_info */
136 /* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */
137 {r29 = add(r29,r0); THREADINFO_REG = r29; }
138
139 /* Hack: zero bss; */
140 { r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }
141 { r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }
142
143 r2 = sub(r2,r0);
144 call memset;
145
146 /* Time to make the doughnuts. */
147 call start_kernel
148
149 /*
150 * Should not reach here.
151 */
1521:
153 jump 1b
154
155.p2align PAGE_SHIFT
156ENTRY(external_cmdline_buffer)
157 .fill _PAGE_SIZE,1,0
158
159.data
160.p2align PAGE_SHIFT
161ENTRY(empty_zero_page)
162 .fill _PAGE_SIZE,1,0
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
deleted file mode 100644
index 32b1379d687..00000000000
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Export of symbols defined in assembly files and/or libgcc.
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/hexagon_vm.h>
22#include <asm/uaccess.h>
23
24EXPORT_SYMBOL(__copy_from_user_hexagon);
25EXPORT_SYMBOL(__copy_to_user_hexagon);
26EXPORT_SYMBOL(__vmgetie);
27EXPORT_SYMBOL(__vmsetie);
28EXPORT_SYMBOL(memcpy);
29EXPORT_SYMBOL(memset);
30
31#define DECLARE_EXPORT(name) \
32 extern void name(void); EXPORT_SYMBOL(name)
33
34/* Symbols found in libgcc that assorted kernel modules need */
35DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
36
37DECLARE_EXPORT(__hexagon_divsi3);
38DECLARE_EXPORT(__hexagon_modsi3);
39DECLARE_EXPORT(__hexagon_udivsi3);
40DECLARE_EXPORT(__hexagon_umodsi3);
diff --git a/arch/hexagon/kernel/irq_cpu.c b/arch/hexagon/kernel/irq_cpu.c
deleted file mode 100644
index 85883e1fdc1..00000000000
--- a/arch/hexagon/kernel/irq_cpu.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * First-level interrupt controller model for Hexagon.
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/interrupt.h>
22#include <asm/irq.h>
23#include <asm/hexagon_vm.h>
24
25static void mask_irq(struct irq_data *data)
26{
27 __vmintop_locdis((long) data->irq);
28}
29
30static void mask_irq_num(unsigned int irq)
31{
32 __vmintop_locdis((long) irq);
33}
34
35static void unmask_irq(struct irq_data *data)
36{
37 __vmintop_locen((long) data->irq);
38}
39
40/* This is actually all we need for handle_fasteoi_irq */
41static void eoi_irq(struct irq_data *data)
42{
43 __vmintop_globen((long) data->irq);
44}
45
46/* Power mamangement wake call. We don't need this, however,
47 * if this is absent, then an -ENXIO error is returned to the
48 * msm_serial driver, and it fails to correctly initialize.
49 * This is a bug in the msm_serial driver, but, for now, we
50 * work around it here, by providing this bogus handler.
51 * XXX FIXME!!! remove this when msm_serial is fixed.
52 */
53static int set_wake(struct irq_data *data, unsigned int on)
54{
55 return 0;
56}
57
58static struct irq_chip hexagon_irq_chip = {
59 .name = "HEXAGON",
60 .irq_mask = mask_irq,
61 .irq_unmask = unmask_irq,
62 .irq_set_wake = set_wake,
63 .irq_eoi = eoi_irq
64};
65
66/**
67 * The hexagon core comes with a first-level interrupt controller
68 * with 32 total possible interrupts. When the core is embedded
69 * into different systems/platforms, it is typically wrapped by
70 * macro cells that provide one or more second-level interrupt
71 * controllers that are cascaded into one or more of the first-level
72 * interrupts handled here. The precise wiring of these other
73 * irqs varies from platform to platform, and are set up & configured
74 * in the platform-specific files.
75 *
76 * The first-level interrupt controller is wrapped by the VM, which
77 * virtualizes the interrupt controller for us. It provides a very
78 * simple, fast & efficient API, and so the fasteoi handler is
79 * appropriate for this case.
80 */
81void __init init_IRQ(void)
82{
83 int irq;
84
85 for (irq = 0; irq < HEXAGON_CPUINTS; irq++) {
86 mask_irq_num(irq);
87 irq_set_chip_and_handler(irq, &hexagon_irq_chip,
88 handle_fasteoi_irq);
89 }
90}
diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c
deleted file mode 100644
index 34464537064..00000000000
--- a/arch/hexagon/kernel/kgdb.c
+++ /dev/null
@@ -1,254 +0,0 @@
1/*
2 * arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
3 *
4 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23
24/* All registers are 4 bytes, for now */
25#define GDB_SIZEOF_REG 4
26
27/* The register names are used during printing of the regs;
28 * Keep these at three letters to pretty-print. */
29struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
30 { " r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, r00)},
31 { " r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, r01)},
32 { " r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r02)},
33 { " r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r03)},
34 { " r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r04)},
35 { " r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r05)},
36 { " r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r06)},
37 { " r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r07)},
38 { " r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r08)},
39 { " r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r09)},
40 { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10)},
41 { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11)},
42 { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12)},
43 { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13)},
44 { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14)},
45 { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15)},
46 { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, r16)},
47 { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, r17)},
48 { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, r18)},
49 { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, r19)},
50 { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, r20)},
51 { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, r21)},
52 { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, r22)},
53 { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, r23)},
54 { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, r24)},
55 { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, r25)},
56 { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, r26)},
57 { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, r27)},
58 { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, r28)},
59 { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, r29)},
60 { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, r30)},
61 { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, r31)},
62
63 { "usr", GDB_SIZEOF_REG, offsetof(struct pt_regs, usr)},
64 { "preds", GDB_SIZEOF_REG, offsetof(struct pt_regs, preds)},
65 { " m0", GDB_SIZEOF_REG, offsetof(struct pt_regs, m0)},
66 { " m1", GDB_SIZEOF_REG, offsetof(struct pt_regs, m1)},
67 { "sa0", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa0)},
68 { "sa1", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa1)},
69 { "lc0", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc0)},
70 { "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
71 { " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
72 { "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
73 { "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
74 { "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
75 { "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
76 { "badva", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmbadva)},
77 { "restart_r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, restart_r0)},
78 { "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
79};
80
81struct kgdb_arch arch_kgdb_ops = {
82 /* trap0(#0xDB) 0x0cdb0054 */
83 .gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
84};
85
86char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
87{
88 if (regno >= DBG_MAX_REG_NUM || regno < 0)
89 return NULL;
90
91 *((unsigned long *) mem) = *((unsigned long *) ((void *)regs +
92 dbg_reg_def[regno].offset));
93
94 return dbg_reg_def[regno].name;
95}
96
97int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
98{
99 if (regno >= DBG_MAX_REG_NUM || regno < 0)
100 return -EINVAL;
101
102 *((unsigned long *) ((void *)regs + dbg_reg_def[regno].offset)) =
103 *((unsigned long *) mem);
104
105 return 0;
106}
107
108void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
109{
110 instruction_pointer(regs) = pc;
111}
112
113#ifdef CONFIG_SMP
114
115/**
116 * kgdb_roundup_cpus - Get other CPUs into a holding pattern
117 * @flags: Current IRQ state
118 *
119 * On SMP systems, we need to get the attention of the other CPUs
120 * and get them be in a known state. This should do what is needed
121 * to get the other CPUs to call kgdb_wait(). Note that on some arches,
122 * the NMI approach is not used for rounding up all the CPUs. For example,
123 * in case of MIPS, smp_call_function() is used to roundup CPUs. In
124 * this case, we have to make sure that interrupts are enabled before
125 * calling smp_call_function(). The argument to this function is
126 * the flags that will be used when restoring the interrupts. There is
127 * local_irq_save() call before kgdb_roundup_cpus().
128 *
129 * On non-SMP systems, this is not called.
130 */
131
132static void hexagon_kgdb_nmi_hook(void *ignored)
133{
134 kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
135}
136
137void kgdb_roundup_cpus(unsigned long flags)
138{
139 local_irq_enable();
140 smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
141 local_irq_disable();
142}
143#endif
144
145
146/* Not yet working */
147void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
148 struct task_struct *task)
149{
150 struct pt_regs *thread_regs;
151
152 if (task == NULL)
153 return;
154
155 /* Initialize to zero */
156 memset(gdb_regs, 0, NUMREGBYTES);
157
158 /* Otherwise, we have only some registers from switch_to() */
159 thread_regs = task_pt_regs(task);
160 gdb_regs[0] = thread_regs->r00;
161}
162
163/**
164 * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
165 * @vector: The error vector of the exception that happened.
166 * @signo: The signal number of the exception that happened.
167 * @err_code: The error code of the exception that happened.
168 * @remcom_in_buffer: The buffer of the packet we have read.
169 * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
170 * @regs: The &struct pt_regs of the current process.
171 *
172 * This function MUST handle the 'c' and 's' command packets,
173 * as well packets to set / remove a hardware breakpoint, if used.
174 * If there are additional packets which the hardware needs to handle,
175 * they are handled here. The code should return -1 if it wants to
176 * process more packets, and a %0 or %1 if it wants to exit from the
177 * kgdb callback.
178 *
179 * Not yet working.
180 */
181int kgdb_arch_handle_exception(int vector, int signo, int err_code,
182 char *remcom_in_buffer, char *remcom_out_buffer,
183 struct pt_regs *linux_regs)
184{
185 switch (remcom_in_buffer[0]) {
186 case 's':
187 case 'c':
188 return 0;
189 }
190 /* Stay in the debugger. */
191 return -1;
192}
193
194static int __kgdb_notify(struct die_args *args, unsigned long cmd)
195{
196 /* cpu roundup */
197 if (atomic_read(&kgdb_active) != -1) {
198 kgdb_nmicallback(smp_processor_id(), args->regs);
199 return NOTIFY_STOP;
200 }
201
202 if (user_mode(args->regs))
203 return NOTIFY_DONE;
204
205 if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
206 args->regs))
207 return NOTIFY_DONE;
208
209 return NOTIFY_STOP;
210}
211
212static int
213kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
214{
215 unsigned long flags;
216 int ret;
217
218 local_irq_save(flags);
219 ret = __kgdb_notify(ptr, cmd);
220 local_irq_restore(flags);
221
222 return ret;
223}
224
225static struct notifier_block kgdb_notifier = {
226 .notifier_call = kgdb_notify,
227
228 /*
229 * Lowest-prio notifier priority, we want to be notified last:
230 */
231 .priority = -INT_MAX,
232};
233
234/**
235 * kgdb_arch_init - Perform any architecture specific initalization.
236 *
237 * This function will handle the initalization of any architecture
238 * specific callbacks.
239 */
240int kgdb_arch_init(void)
241{
242 return register_die_notifier(&kgdb_notifier);
243}
244
245/**
246 * kgdb_arch_exit - Perform any architecture specific uninitalization.
247 *
248 * This function will handle the uninitalization of any architecture
249 * specific callbacks, for dynamic registration and unregistration.
250 */
251void kgdb_arch_exit(void)
252{
253 unregister_die_notifier(&kgdb_notifier);
254}
diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c
deleted file mode 100644
index 477d07a5646..00000000000
--- a/arch/hexagon/kernel/module.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Kernel module loader for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/module.h>
22#include <linux/elf.h>
23#include <linux/module.h>
24#include <linux/moduleloader.h>
25#include <linux/vmalloc.h>
26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(fmt , ...)
31#endif
32
33/*
34 * module_frob_arch_sections - tweak got/plt sections.
35 * @hdr - pointer to elf header
36 * @sechdrs - pointer to elf load section headers
37 * @secstrings - symbol names
38 * @mod - pointer to module
39 */
40int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
41 char *secstrings,
42 struct module *mod)
43{
44 unsigned int i;
45 int found = 0;
46
47 /* Look for .plt and/or .got.plt and/or .init.plt sections */
48 for (i = 0; i < hdr->e_shnum; i++) {
49 DEBUGP("Section %d is %s\n", i,
50 secstrings + sechdrs[i].sh_name);
51 if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
52 found = i+1;
53 if (strcmp(secstrings + sechdrs[i].sh_name, ".got.plt") == 0)
54 found = i+1;
55 if (strcmp(secstrings + sechdrs[i].sh_name, ".rela.plt") == 0)
56 found = i+1;
57 }
58
59 /* At this time, we don't support modules comiled with -shared */
60 if (found) {
61 printk(KERN_WARNING
62 "Module '%s' contains unexpected .plt/.got sections.\n",
63 mod->name);
64 /* return -ENOEXEC; */
65 }
66
67 return 0;
68}
69
70/*
71 * apply_relocate_add - perform rela relocations.
72 * @sechdrs - pointer to section headers
73 * @strtab - some sort of start address?
74 * @symindex - symbol index offset or something?
75 * @relsec - address to relocate to?
76 * @module - pointer to module
77 *
78 * Perform rela relocations.
79 */
80int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
81 unsigned int symindex, unsigned int relsec,
82 struct module *module)
83{
84 unsigned int i;
85 Elf32_Sym *sym;
86 uint32_t *location;
87 uint32_t value;
88 unsigned int nrelocs = sechdrs[relsec].sh_size / sizeof(Elf32_Rela);
89 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
90 Elf32_Word sym_info = sechdrs[relsec].sh_info;
91 Elf32_Sym *sym_base = (Elf32_Sym *) sechdrs[symindex].sh_addr;
92 void *loc_base = (void *) sechdrs[sym_info].sh_addr;
93
94 DEBUGP("Applying relocations in section %u to section %u base=%p\n",
95 relsec, sym_info, loc_base);
96
97 for (i = 0; i < nrelocs; i++) {
98
99 /* Symbol to relocate */
100 sym = sym_base + ELF32_R_SYM(rela[i].r_info);
101
102 /* Where to make the change */
103 location = loc_base + rela[i].r_offset;
104
105 /* `Everything is relative'. */
106 value = sym->st_value + rela[i].r_addend;
107
108 DEBUGP("%d: value=%08x loc=%p reloc=%d symbol=%s\n",
109 i, value, location, ELF32_R_TYPE(rela[i].r_info),
110 sym->st_name ?
111 &strtab[sym->st_name] : "(anonymous)");
112
113 switch (ELF32_R_TYPE(rela[i].r_info)) {
114 case R_HEXAGON_B22_PCREL: {
115 int dist = (int)(value - (uint32_t)location);
116 if ((dist < -0x00800000) ||
117 (dist >= 0x00800000)) {
118 printk(KERN_ERR
119 "%s: %s: %08x=%08x-%08x %s\n",
120 module->name,
121 "R_HEXAGON_B22_PCREL reloc out of range",
122 dist, value, (uint32_t)location,
123 sym->st_name ?
124 &strtab[sym->st_name] : "(anonymous)");
125 return -ENOEXEC;
126 }
127 DEBUGP("B22_PCREL contents: %08X.\n", *location);
128 *location &= ~0x01ff3fff;
129 *location |= 0x00003fff & dist;
130 *location |= 0x01ff0000 & (dist<<2);
131 DEBUGP("Contents after reloc: %08x\n", *location);
132 break;
133 }
134 case R_HEXAGON_HI16:
135 value = (value>>16) & 0xffff;
136 /* fallthrough */
137 case R_HEXAGON_LO16:
138 *location &= ~0x00c03fff;
139 *location |= value & 0x3fff;
140 *location |= (value & 0xc000) << 8;
141 break;
142 case R_HEXAGON_32:
143 *location = value;
144 break;
145 case R_HEXAGON_32_PCREL:
146 *location = value - (uint32_t)location;
147 break;
148 case R_HEXAGON_PLT_B22_PCREL:
149 case R_HEXAGON_GOTOFF_LO16:
150 case R_HEXAGON_GOTOFF_HI16:
151 printk(KERN_ERR "%s: GOT/PLT relocations unsupported\n",
152 module->name);
153 return -ENOEXEC;
154 default:
155 printk(KERN_ERR "%s: unknown relocation: %u\n",
156 module->name,
157 ELF32_R_TYPE(rela[i].r_info));
158 return -ENOEXEC;
159 }
160 }
161 return 0;
162}
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
deleted file mode 100644
index 06ae9ffcabd..00000000000
--- a/arch/hexagon/kernel/process.c
+++ /dev/null
@@ -1,204 +0,0 @@
1/*
2 * Process creation support for Hexagon
3 *
4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/sched.h>
22#include <linux/types.h>
23#include <linux/module.h>
24#include <linux/tick.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27
28/*
29 * Program thread launch. Often defined as a macro in processor.h,
30 * but we're shooting for a small footprint and it's not an inner-loop
31 * performance-critical operation.
32 *
33 * The Hexagon ABI specifies that R28 is zero'ed before program launch,
34 * so that gets automatically done here. If we ever stop doing that here,
35 * we'll probably want to define the ELF_PLAT_INIT macro.
36 */
37void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
38{
39 /* Set to run with user-mode data segmentation */
40 set_fs(USER_DS);
41 /* We want to zero all data-containing registers. Is this overkill? */
42 memset(regs, 0, sizeof(*regs));
43 /* We might want to also zero all Processor registers here */
44 pt_set_usermode(regs);
45 pt_set_elr(regs, pc);
46 pt_set_rte_sp(regs, sp);
47}
48
49/*
50 * Spin, or better still, do a hardware or VM wait instruction
51 * If hardware or VM offer wait termination even though interrupts
52 * are disabled.
53 */
54static void default_idle(void)
55{
56 __vmwait();
57}
58
59void (*idle_sleep)(void) = default_idle;
60
61void cpu_idle(void)
62{
63 while (1) {
64 tick_nohz_idle_enter();
65 local_irq_disable();
66 while (!need_resched()) {
67 idle_sleep();
68 /* interrupts wake us up, but aren't serviced */
69 local_irq_enable(); /* service interrupt */
70 local_irq_disable();
71 }
72 local_irq_enable();
73 tick_nohz_idle_exit();
74 schedule();
75 }
76}
77
78/*
79 * Return saved PC of a blocked thread
80 */
81unsigned long thread_saved_pc(struct task_struct *tsk)
82{
83 return 0;
84}
85
86/*
87 * Copy architecture-specific thread state
88 */
89int copy_thread(unsigned long clone_flags, unsigned long usp,
90 unsigned long arg, struct task_struct *p)
91{
92 struct thread_info *ti = task_thread_info(p);
93 struct hexagon_switch_stack *ss;
94 struct pt_regs *childregs;
95 asmlinkage void ret_from_fork(void);
96
97 childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
98 sizeof(*childregs));
99
100 ti->regs = childregs;
101
102 /*
103 * Establish kernel stack pointer and initial PC for new thread
104 * Note that unlike the usual situation, we do not copy the
105 * parent's callee-saved here; those are in pt_regs and whatever
106 * we leave here will be overridden on return to userland.
107 */
108 ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
109 sizeof(*ss));
110 ss->lr = (unsigned long)ret_from_fork;
111 p->thread.switch_sp = ss;
112 if (unlikely(p->flags & PF_KTHREAD)) {
113 memset(childregs, 0, sizeof(struct pt_regs));
114 /* r24 <- fn, r25 <- arg */
115 ss->r2524 = usp | ((u64)arg << 32);
116 pt_set_kmode(childregs);
117 return 0;
118 }
119 memcpy(childregs, current_pt_regs(), sizeof(*childregs));
120 ss->r2524 = 0;
121
122 if (usp)
123 pt_set_rte_sp(childregs, usp);
124
125 /* Child sees zero return value */
126 childregs->r00 = 0;
127
128 /*
129 * The clone syscall has the C signature:
130 * int [r0] clone(int flags [r0],
131 * void *child_frame [r1],
132 * void *parent_tid [r2],
133 * void *child_tid [r3],
134 * void *thread_control_block [r4]);
135 * ugp is used to provide TLS support.
136 */
137 if (clone_flags & CLONE_SETTLS)
138 childregs->ugp = childregs->r04;
139
140 /*
141 * Parent sees new pid -- not necessary, not even possible at
142 * this point in the fork process
143 * Might also want to set things like ti->addr_limit
144 */
145
146 return 0;
147}
148
149/*
150 * Release any architecture-specific resources locked by thread
151 */
152void release_thread(struct task_struct *dead_task)
153{
154}
155
156/*
157 * Free any architecture-specific thread data structures, etc.
158 */
159void exit_thread(void)
160{
161}
162
163/*
164 * Some archs flush debug and FPU info here
165 */
166void flush_thread(void)
167{
168}
169
170/*
171 * The "wait channel" terminology is archaic, but what we want
172 * is an identification of the point at which the scheduler
173 * was invoked by a blocked thread.
174 */
175unsigned long get_wchan(struct task_struct *p)
176{
177 unsigned long fp, pc;
178 unsigned long stack_page;
179 int count = 0;
180 if (!p || p == current || p->state == TASK_RUNNING)
181 return 0;
182
183 stack_page = (unsigned long)task_stack_page(p);
184 fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
185 do {
186 if (fp < (stack_page + sizeof(struct thread_info)) ||
187 fp >= (THREAD_SIZE - 8 + stack_page))
188 return 0;
189 pc = ((unsigned long *)fp)[1];
190 if (!in_sched_functions(pc))
191 return pc;
192 fp = *(unsigned long *) fp;
193 } while (count++ < 16);
194
195 return 0;
196}
197
198/*
199 * Required placeholder.
200 */
201int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
202{
203 return 0;
204}
diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
deleted file mode 100644
index 670b1b0bee6..00000000000
--- a/arch/hexagon/kernel/ptrace.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * Ptrace support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <generated/compile.h>
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/errno.h>
28#include <linux/ptrace.h>
29#include <linux/regset.h>
30#include <linux/user.h>
31#include <linux/elf.h>
32
33#include <asm/user.h>
34
35static int genregs_get(struct task_struct *target,
36 const struct user_regset *regset,
37 unsigned int pos, unsigned int count,
38 void *kbuf, void __user *ubuf)
39{
40 int ret;
41 unsigned int dummy;
42 struct pt_regs *regs = task_pt_regs(target);
43
44
45 if (!regs)
46 return -EIO;
47
48 /* The general idea here is that the copyout must happen in
49 * exactly the same order in which the userspace expects these
50 * regs. Now, the sequence in userspace does not match the
51 * sequence in the kernel, so everything past the 32 gprs
52 * happens one at a time.
53 */
54 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
55 &regs->r00, 0, 32*sizeof(unsigned long));
56
57#define ONEXT(KPT_REG, USR_REG) \
58 if (!ret) \
59 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
60 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
61 offsetof(struct user_regs_struct, USR_REG) + \
62 sizeof(unsigned long));
63
64 /* Must be exactly same sequence as struct user_regs_struct */
65 ONEXT(&regs->sa0, sa0);
66 ONEXT(&regs->lc0, lc0);
67 ONEXT(&regs->sa1, sa1);
68 ONEXT(&regs->lc1, lc1);
69 ONEXT(&regs->m0, m0);
70 ONEXT(&regs->m1, m1);
71 ONEXT(&regs->usr, usr);
72 ONEXT(&regs->preds, p3_0);
73 ONEXT(&regs->gp, gp);
74 ONEXT(&regs->ugp, ugp);
75 ONEXT(&pt_elr(regs), pc);
76 dummy = pt_cause(regs);
77 ONEXT(&dummy, cause);
78 ONEXT(&pt_badva(regs), badva);
79
80 /* Pad the rest with zeros, if needed */
81 if (!ret)
82 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
83 offsetof(struct user_regs_struct, pad1), -1);
84 return ret;
85}
86
87static int genregs_set(struct task_struct *target,
88 const struct user_regset *regset,
89 unsigned int pos, unsigned int count,
90 const void *kbuf, const void __user *ubuf)
91{
92 int ret;
93 unsigned long bucket;
94 struct pt_regs *regs = task_pt_regs(target);
95
96 if (!regs)
97 return -EIO;
98
99 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
100 &regs->r00, 0, 32*sizeof(unsigned long));
101
102#define INEXT(KPT_REG, USR_REG) \
103 if (!ret) \
104 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
105 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
106 offsetof(struct user_regs_struct, USR_REG) + \
107 sizeof(unsigned long));
108
109 /* Must be exactly same sequence as struct user_regs_struct */
110 INEXT(&regs->sa0, sa0);
111 INEXT(&regs->lc0, lc0);
112 INEXT(&regs->sa1, sa1);
113 INEXT(&regs->lc1, lc1);
114 INEXT(&regs->m0, m0);
115 INEXT(&regs->m1, m1);
116 INEXT(&regs->usr, usr);
117 INEXT(&regs->preds, p3_0);
118 INEXT(&regs->gp, gp);
119 INEXT(&regs->ugp, ugp);
120 INEXT(&pt_elr(regs), pc);
121
122 /* CAUSE and BADVA aren't writeable. */
123 INEXT(&bucket, cause);
124 INEXT(&bucket, badva);
125
126 /* Ignore the rest, if needed */
127 if (!ret)
128 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
129 offsetof(struct user_regs_struct, pad1), -1);
130
131 if (ret)
132 return ret;
133
134 /*
135 * This is special; SP is actually restored by the VM via the
136 * special event record which is set by the special trap.
137 */
138 regs->hvmer.vmpsp = regs->r29;
139 return 0;
140}
141
142enum hexagon_regset {
143 REGSET_GENERAL,
144};
145
146static const struct user_regset hexagon_regsets[] = {
147 [REGSET_GENERAL] = {
148 .core_note_type = NT_PRSTATUS,
149 .n = ELF_NGREG,
150 .size = sizeof(unsigned long),
151 .align = sizeof(unsigned long),
152 .get = genregs_get,
153 .set = genregs_set,
154 },
155};
156
157static const struct user_regset_view hexagon_user_view = {
158 .name = UTS_MACHINE,
159 .e_machine = ELF_ARCH,
160 .ei_osabi = ELF_OSABI,
161 .regsets = hexagon_regsets,
162 .n = ARRAY_SIZE(hexagon_regsets)
163};
164
165const struct user_regset_view *task_user_regset_view(struct task_struct *task)
166{
167 return &hexagon_user_view;
168}
169
170void ptrace_disable(struct task_struct *child)
171{
172 /* Boilerplate - resolves to null inline if no HW single-step */
173 user_disable_single_step(child);
174}
175
176long arch_ptrace(struct task_struct *child, long request,
177 unsigned long addr, unsigned long data)
178{
179 return ptrace_request(child, request, addr, data);
180}
diff --git a/arch/hexagon/kernel/reset.c b/arch/hexagon/kernel/reset.c
deleted file mode 100644
index 6aeabc962b3..00000000000
--- a/arch/hexagon/kernel/reset.c
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#include <linux/smp.h>
20#include <asm/hexagon_vm.h>
21
22void machine_power_off(void)
23{
24 smp_send_stop();
25 __vmstop();
26}
27
28void machine_halt(void)
29{
30}
31
32void machine_restart(char *cmd)
33{
34}
35
36void pm_power_off(void)
37{
38}
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
deleted file mode 100644
index 94a38783500..00000000000
--- a/arch/hexagon/kernel/setup.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * Arch related setup for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/bootmem.h>
23#include <linux/mmzone.h>
24#include <linux/mm.h>
25#include <linux/seq_file.h>
26#include <linux/console.h>
27#include <linux/of_fdt.h>
28#include <asm/io.h>
29#include <asm/sections.h>
30#include <asm/setup.h>
31#include <asm/processor.h>
32#include <asm/hexagon_vm.h>
33#include <asm/vm_mmu.h>
34#include <asm/time.h>
35#ifdef CONFIG_OF
36#include <asm/prom.h>
37#endif
38
39char cmd_line[COMMAND_LINE_SIZE];
40static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
41
42int on_simulator;
43
44void __cpuinit calibrate_delay(void)
45{
46 loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
47}
48
49/*
50 * setup_arch - high level architectural setup routine
51 * @cmdline_p: pointer to pointer to command-line arguments
52 */
53
54void __init setup_arch(char **cmdline_p)
55{
56 char *p = &external_cmdline_buffer;
57
58 /*
59 * These will eventually be pulled in via either some hypervisor
60 * or devicetree description. Hardwiring for now.
61 */
62 pcycle_freq_mhz = 600;
63 thread_freq_mhz = 100;
64 sleep_clk_freq = 32000;
65
66 /*
67 * Set up event bindings to handle exceptions and interrupts.
68 */
69 __vmsetvec(_K_VM_event_vector);
70
71 /*
72 * Simulator has a few differences from the hardware.
73 * For now, check uninitialized-but-mapped memory
74 * prior to invoking setup_arch_memory().
75 */
76 if (*(int *)((unsigned long)_end + 8) == 0x1f1f1f1f)
77 on_simulator = 1;
78 else
79 on_simulator = 0;
80
81 if (p[0] != '\0')
82 strlcpy(boot_command_line, p, COMMAND_LINE_SIZE);
83 else
84 strlcpy(boot_command_line, default_command_line,
85 COMMAND_LINE_SIZE);
86
87 /*
88 * boot_command_line and the value set up by setup_arch
89 * are both picked up by the init code. If no reason to
90 * make them different, pass the same pointer back.
91 */
92 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
93 *cmdline_p = cmd_line;
94
95 parse_early_param();
96
97 setup_arch_memory();
98
99#ifdef CONFIG_SMP
100 smp_start_cpus();
101#endif
102}
103
104/*
105 * Functions for dumping CPU info via /proc
106 * Probably should move to kernel/proc.c or something.
107 */
108static void *c_start(struct seq_file *m, loff_t *pos)
109{
110 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
111}
112
113static void *c_next(struct seq_file *m, void *v, loff_t *pos)
114{
115 ++*pos;
116 return c_start(m, pos);
117}
118
119static void c_stop(struct seq_file *m, void *v)
120{
121}
122
123/*
124 * Eventually this will dump information about
125 * CPU properties like ISA level, TLB size, etc.
126 */
127static int show_cpuinfo(struct seq_file *m, void *v)
128{
129 int cpu = (unsigned long) v - 1;
130
131 seq_printf(m, "processor\t: %d\n", cpu);
132 seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
133 seq_printf(m, "BogoMips\t: %lu.%02lu\n",
134 (loops_per_jiffy * HZ) / 500000,
135 ((loops_per_jiffy * HZ) / 5000) % 100);
136 seq_printf(m, "\n");
137 return 0;
138}
139
140const struct seq_operations cpuinfo_op = {
141 .start = &c_start,
142 .next = &c_next,
143 .stop = &c_stop,
144 .show = &show_cpuinfo,
145};
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
deleted file mode 100644
index fe0d1373165..00000000000
--- a/arch/hexagon/kernel/signal.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/*
2 * Signal support for Hexagon processor
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/linkage.h>
22#include <linux/syscalls.h>
23#include <linux/tracehook.h>
24#include <asm/registers.h>
25#include <asm/thread_info.h>
26#include <asm/unistd.h>
27#include <asm/uaccess.h>
28#include <asm/ucontext.h>
29#include <asm/cacheflush.h>
30#include <asm/signal.h>
31#include <asm/vdso.h>
32
33struct rt_sigframe {
34 unsigned long tramp[2];
35 struct siginfo info;
36 struct ucontext uc;
37};
38
39static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
40 size_t frame_size)
41{
42 unsigned long sp = regs->r29;
43
44 /* Switch to signal stack if appropriate */
45 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
46 sp = current->sas_ss_sp + current->sas_ss_size;
47
48 return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1));
49}
50
51static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
52{
53 unsigned long tmp;
54 int err = 0;
55
56 err |= copy_to_user(&sc->sc_regs.r0, &regs->r00,
57 32*sizeof(unsigned long));
58
59 err |= __put_user(regs->sa0, &sc->sc_regs.sa0);
60 err |= __put_user(regs->lc0, &sc->sc_regs.lc0);
61 err |= __put_user(regs->sa1, &sc->sc_regs.sa1);
62 err |= __put_user(regs->lc1, &sc->sc_regs.lc1);
63 err |= __put_user(regs->m0, &sc->sc_regs.m0);
64 err |= __put_user(regs->m1, &sc->sc_regs.m1);
65 err |= __put_user(regs->usr, &sc->sc_regs.usr);
66 err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
67 err |= __put_user(regs->gp, &sc->sc_regs.gp);
68 err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
69
70 tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
71 tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
72 tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
73
74 return err;
75}
76
77static int restore_sigcontext(struct pt_regs *regs,
78 struct sigcontext __user *sc)
79{
80 unsigned long tmp;
81 int err = 0;
82
83 err |= copy_from_user(&regs->r00, &sc->sc_regs.r0,
84 32 * sizeof(unsigned long));
85
86 err |= __get_user(regs->sa0, &sc->sc_regs.sa0);
87 err |= __get_user(regs->lc0, &sc->sc_regs.lc0);
88 err |= __get_user(regs->sa1, &sc->sc_regs.sa1);
89 err |= __get_user(regs->lc1, &sc->sc_regs.lc1);
90 err |= __get_user(regs->m0, &sc->sc_regs.m0);
91 err |= __get_user(regs->m1, &sc->sc_regs.m1);
92 err |= __get_user(regs->usr, &sc->sc_regs.usr);
93 err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
94 err |= __get_user(regs->gp, &sc->sc_regs.gp);
95 err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
96
97 err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
98
99 return err;
100}
101
102/*
103 * Setup signal stack frame with siginfo structure
104 */
105static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
106 sigset_t *set, struct pt_regs *regs)
107{
108 int err = 0;
109 struct rt_sigframe __user *frame;
110 struct hexagon_vdso *vdso = current->mm->context.vdso;
111
112 frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
113
114 if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe)))
115 goto sigsegv;
116
117 if (copy_siginfo_to_user(&frame->info, info))
118 goto sigsegv;
119
120 /* The on-stack signal trampoline is no longer executed;
121 * however, the libgcc signal frame unwinding code checks for
122 * the presence of these two numeric magic values.
123 */
124 err |= __put_user(0x7800d166, &frame->tramp[0]);
125 err |= __put_user(0x5400c004, &frame->tramp[1]);
126 err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
127 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
128 if (err)
129 goto sigsegv;
130
131 /* Load r0/r1 pair with signumber/siginfo pointer... */
132 regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32)
133 | (unsigned long long)signr;
134 regs->r02 = (unsigned long) &frame->uc;
135 regs->r31 = (unsigned long) vdso->rt_signal_trampoline;
136 pt_psp(regs) = (unsigned long) frame;
137 pt_set_elr(regs, (unsigned long)ka->sa.sa_handler);
138
139 return 0;
140
141sigsegv:
142 force_sigsegv(signr, current);
143 return -EFAULT;
144}
145
146/*
147 * Setup invocation of signal handler
148 */
149static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
150 struct pt_regs *regs)
151{
152 /*
153 * If we're handling a signal that aborted a system call,
154 * set up the error return value before adding the signal
155 * frame to the stack.
156 */
157
158 if (regs->syscall_nr >= 0) {
159 switch (regs->r00) {
160 case -ERESTART_RESTARTBLOCK:
161 case -ERESTARTNOHAND:
162 regs->r00 = -EINTR;
163 break;
164 case -ERESTARTSYS:
165 if (!(ka->sa.sa_flags & SA_RESTART)) {
166 regs->r00 = -EINTR;
167 break;
168 }
169 /* Fall through */
170 case -ERESTARTNOINTR:
171 regs->r06 = regs->syscall_nr;
172 pt_set_elr(regs, pt_elr(regs) - 4);
173 regs->r00 = regs->restart_r0;
174 break;
175 default:
176 break;
177 }
178 }
179
180 /*
181 * Set up the stack frame; not doing the SA_SIGINFO thing. We
182 * only set up the rt_frame flavor.
183 */
184 /* If there was an error on setup, no signal was delivered. */
185 if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
186 return;
187
188 signal_delivered(sig, info, ka, regs,
189 test_thread_flag(TIF_SINGLESTEP));
190}
191
192/*
193 * Called from return-from-event code.
194 */
195static void do_signal(struct pt_regs *regs)
196{
197 struct k_sigaction sigact;
198 siginfo_t info;
199 int signo;
200
201 if (!user_mode(regs))
202 return;
203
204 signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
205
206 if (signo > 0) {
207 handle_signal(signo, &info, &sigact, regs);
208 return;
209 }
210
211 /*
212 * If we came from a system call, handle the restart.
213 */
214 if (regs->syscall_nr >= 0) {
215 switch (regs->r00) {
216 case -ERESTARTNOHAND:
217 case -ERESTARTSYS:
218 case -ERESTARTNOINTR:
219 regs->r06 = regs->syscall_nr;
220 break;
221 case -ERESTART_RESTARTBLOCK:
222 regs->r06 = __NR_restart_syscall;
223 break;
224 default:
225 goto no_restart;
226 }
227 pt_set_elr(regs, pt_elr(regs) - 4);
228 regs->r00 = regs->restart_r0;
229 }
230
231no_restart:
232 /* If there's no signal to deliver, put the saved sigmask back */
233 restore_saved_sigmask();
234}
235
236void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
237{
238 if (thread_info_flags & _TIF_SIGPENDING)
239 do_signal(regs);
240
241 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
242 clear_thread_flag(TIF_NOTIFY_RESUME);
243 tracehook_notify_resume(regs);
244 }
245}
246
247/*
248 * Architecture-specific wrappers for signal-related system calls
249 */
250asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
251{
252 struct pt_regs *regs = current_pt_regs();
253
254 return do_sigaltstack(uss, uoss, regs->r29);
255}
256
257asmlinkage int sys_rt_sigreturn(void)
258{
259 struct pt_regs *regs = current_pt_regs();
260 struct rt_sigframe __user *frame;
261 sigset_t blocked;
262
263 /* Always make any pending restarted system calls return -EINTR */
264 current_thread_info()->restart_block.fn = do_no_restart_syscall;
265
266 frame = (struct rt_sigframe __user *)pt_psp(regs);
267 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
268 goto badframe;
269 if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
270 goto badframe;
271
272 set_current_blocked(&blocked);
273
274 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
275 goto badframe;
276
277 /* Restore the user's stack as well */
278 pt_psp(regs) = regs->r29;
279
280 /*
281 * Leave a trace in the stack frame that this was a sigreturn.
282 * If the system call is to replay, we've already restored the
283 * number in the GPR slot and it will be regenerated on the
284 * new system call trap entry. Note that if restore_sigcontext()
285 * did something other than a bulk copy of the pt_regs struct,
286 * we could avoid this assignment by simply not overwriting
287 * regs->syscall_nr.
288 */
289 regs->syscall_nr = __NR_rt_sigreturn;
290
291 /*
292 * If we were meticulous, we'd only call this if we knew that
293 * we were actually going to use an alternate stack, and we'd
294 * consider any error to be fatal. What we do here, in common
295 * with many other architectures, is call it blindly and only
296 * consider the -EFAULT return case to be proof of a problem.
297 */
298 if (do_sigaltstack(&frame->uc.uc_stack, NULL, pt_psp(regs)) == -EFAULT)
299 goto badframe;
300
301 return 0;
302
303badframe:
304 force_sig(SIGSEGV, current);
305 return 0;
306}
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
deleted file mode 100644
index 8e095dffd07..00000000000
--- a/arch/hexagon/kernel/smp.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * SMP support for Hexagon
3 *
4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/module.h>
27#include <linux/percpu.h>
28#include <linux/sched.h>
29#include <linux/smp.h>
30#include <linux/spinlock.h>
31#include <linux/cpu.h>
32
33#include <asm/time.h> /* timer_interrupt */
34#include <asm/hexagon_vm.h>
35
36#define BASE_IPI_IRQ 26
37
38/*
39 * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
40 * (which is prior to any of our smp_prepare_cpu crap), in order to set
41 * up the... per_cpu areas.
42 */
43
44struct ipi_data {
45 unsigned long bits;
46};
47
48static DEFINE_PER_CPU(struct ipi_data, ipi_data);
49
50static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
51 int cpu)
52{
53 unsigned long msg = 0;
54 do {
55 msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
56
57 switch (msg) {
58
59 case IPI_TIMER:
60 ipi_timer();
61 break;
62
63 case IPI_CALL_FUNC:
64 generic_smp_call_function_interrupt();
65 break;
66
67 case IPI_CALL_FUNC_SINGLE:
68 generic_smp_call_function_single_interrupt();
69 break;
70
71 case IPI_CPU_STOP:
72 /*
73 * call vmstop()
74 */
75 __vmstop();
76 break;
77
78 case IPI_RESCHEDULE:
79 scheduler_ipi();
80 break;
81 }
82 } while (msg < BITS_PER_LONG);
83}
84
85/* Used for IPI call from other CPU's to unmask int */
86void smp_vm_unmask_irq(void *info)
87{
88 __vmintop_locen((long) info);
89}
90
91
92/*
93 * This is based on Alpha's IPI stuff.
94 * Supposed to take (int, void*) as args now.
95 * Specifically, first arg is irq, second is the irq_desc.
96 */
97
98irqreturn_t handle_ipi(int irq, void *desc)
99{
100 int cpu = smp_processor_id();
101 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
102 unsigned long ops;
103
104 while ((ops = xchg(&ipi->bits, 0)) != 0)
105 __handle_ipi(&ops, ipi, cpu);
106 return IRQ_HANDLED;
107}
108
109void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
110{
111 unsigned long flags;
112 unsigned long cpu;
113 unsigned long retval;
114
115 local_irq_save(flags);
116
117 for_each_cpu(cpu, cpumask) {
118 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
119
120 set_bit(msg, &ipi->bits);
121 /* Possible barrier here */
122 retval = __vmintop_post(BASE_IPI_IRQ+cpu);
123
124 if (retval != 0) {
125 printk(KERN_ERR "interrupt %ld not configured?\n",
126 BASE_IPI_IRQ+cpu);
127 }
128 }
129
130 local_irq_restore(flags);
131}
132
133static struct irqaction ipi_intdesc = {
134 .handler = handle_ipi,
135 .flags = IRQF_TRIGGER_RISING,
136 .name = "ipi_handler"
137};
138
139void __init smp_prepare_boot_cpu(void)
140{
141}
142
143/*
144 * interrupts should already be disabled from the VM
145 * SP should already be correct; need to set THREADINFO_REG
146 * to point to current thread info
147 */
148
149void __cpuinit start_secondary(void)
150{
151 unsigned int cpu;
152 unsigned long thread_ptr;
153
154 /* Calculate thread_info pointer from stack pointer */
155 __asm__ __volatile__(
156 "%0 = SP;\n"
157 : "=r" (thread_ptr)
158 );
159
160 thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
161
162 __asm__ __volatile__(
163 QUOTED_THREADINFO_REG " = %0;\n"
164 :
165 : "r" (thread_ptr)
166 );
167
168 /* Set the memory struct */
169 atomic_inc(&init_mm.mm_count);
170 current->active_mm = &init_mm;
171
172 cpu = smp_processor_id();
173
174 setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
175
176 /* Register the clock_event dummy */
177 setup_percpu_clockdev();
178
179 printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
180
181 notify_cpu_starting(cpu);
182
183 set_cpu_online(cpu, true);
184
185 local_irq_enable();
186
187 cpu_idle();
188}
189
190
191/*
192 * called once for each present cpu
193 * apparently starts up the CPU and then
194 * maintains control until "cpu_online(cpu)" is set.
195 */
196
197int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
198{
199 struct thread_info *thread = (struct thread_info *)idle->stack;
200 void *stack_start;
201
202 thread->cpu = cpu;
203
204 /* Boot to the head. */
205 stack_start = ((void *) thread) + THREAD_SIZE;
206 __vmstart(start_secondary, stack_start);
207
208 while (!cpu_online(cpu))
209 barrier();
210
211 return 0;
212}
213
214void __init smp_cpus_done(unsigned int max_cpus)
215{
216}
217
218void __init smp_prepare_cpus(unsigned int max_cpus)
219{
220 int i;
221
222 /*
223 * should eventually have some sort of machine
224 * descriptor that has this stuff
225 */
226
227 /* Right now, let's just fake it. */
228 for (i = 0; i < max_cpus; i++)
229 set_cpu_present(i, true);
230
231 /* Also need to register the interrupts for IPI */
232 if (max_cpus > 1)
233 setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
234}
235
236void smp_send_reschedule(int cpu)
237{
238 send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
239}
240
241void smp_send_stop(void)
242{
243 struct cpumask targets;
244 cpumask_copy(&targets, cpu_online_mask);
245 cpumask_clear_cpu(smp_processor_id(), &targets);
246 send_ipi(&targets, IPI_CPU_STOP);
247}
248
249void arch_send_call_function_single_ipi(int cpu)
250{
251 send_ipi(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
252}
253
254void arch_send_call_function_ipi_mask(const struct cpumask *mask)
255{
256 send_ipi(mask, IPI_CALL_FUNC);
257}
258
259int setup_profiling_timer(unsigned int multiplier)
260{
261 return -EINVAL;
262}
263
264void smp_start_cpus(void)
265{
266 int i;
267
268 for (i = 0; i < NR_CPUS; i++)
269 set_cpu_possible(i, true);
270}
diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
deleted file mode 100644
index f94918b449a..00000000000
--- a/arch/hexagon/kernel/stacktrace.c
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * Stacktrace support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/sched.h>
22#include <linux/stacktrace.h>
23#include <linux/thread_info.h>
24#include <linux/module.h>
25
26register unsigned long current_frame_pointer asm("r30");
27
28struct stackframe {
29 unsigned long fp;
30 unsigned long rets;
31};
32
33/*
34 * Save stack-backtrace addresses into a stack_trace buffer.
35 */
36void save_stack_trace(struct stack_trace *trace)
37{
38 unsigned long low, high;
39 unsigned long fp;
40 struct stackframe *frame;
41 int skip = trace->skip;
42
43 low = (unsigned long)task_stack_page(current);
44 high = low + THREAD_SIZE;
45 fp = current_frame_pointer;
46
47 while (fp >= low && fp <= (high - sizeof(*frame))) {
48 frame = (struct stackframe *)fp;
49
50 if (skip) {
51 skip--;
52 } else {
53 trace->entries[trace->nr_entries++] = frame->rets;
54 if (trace->nr_entries >= trace->max_entries)
55 break;
56 }
57
58 /*
59 * The next frame must be at a higher address than the
60 * current frame.
61 */
62 low = fp + sizeof(*frame);
63 fp = frame->fp;
64 }
65}
66EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
deleted file mode 100644
index 7024b1ddc08..00000000000
--- a/arch/hexagon/kernel/syscalltab.c
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * System call table for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/syscalls.h>
22#include <linux/signal.h>
23#include <linux/unistd.h>
24
25#include <asm/syscall.h>
26
27#undef __SYSCALL
28#define __SYSCALL(nr, call) [nr] = (call),
29
30void *sys_call_table[__NR_syscalls] = {
31#include <asm/unistd.h>
32};
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
deleted file mode 100644
index 9903fad997f..00000000000
--- a/arch/hexagon/kernel/time.c
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * Time related functions for Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/clockchips.h>
23#include <linux/clocksource.h>
24#include <linux/interrupt.h>
25#include <linux/err.h>
26#include <linux/platform_device.h>
27#include <linux/ioport.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/module.h>
32
33#include <asm/timer-regs.h>
34#include <asm/hexagon_vm.h>
35
36/*
37 * For the clocksource we need:
38 * pcycle frequency (600MHz)
39 * For the loops_per_jiffy we need:
40 * thread/cpu frequency (100MHz)
41 * And for the timer, we need:
42 * sleep clock rate
43 */
44
45cycles_t pcycle_freq_mhz;
46cycles_t thread_freq_mhz;
47cycles_t sleep_clk_freq;
48
49static struct resource rtos_timer_resources[] = {
50 {
51 .start = RTOS_TIMER_REGS_ADDR,
52 .end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
53 .flags = IORESOURCE_MEM,
54 },
55};
56
57static struct platform_device rtos_timer_device = {
58 .name = "rtos_timer",
59 .id = -1,
60 .num_resources = ARRAY_SIZE(rtos_timer_resources),
61 .resource = rtos_timer_resources,
62};
63
64/* A lot of this stuff should move into a platform specific section. */
65struct adsp_hw_timer_struct {
66 u32 match; /* Match value */
67 u32 count;
68 u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
69 u32 clear; /* one-shot register that clears the count */
70};
71
72/* Look for "TCX0" for related constants. */
73static __iomem struct adsp_hw_timer_struct *rtos_timer;
74
75static cycle_t timer_get_cycles(struct clocksource *cs)
76{
77 return (cycle_t) __vmgettime();
78}
79
80static struct clocksource hexagon_clocksource = {
81 .name = "pcycles",
82 .rating = 250,
83 .read = timer_get_cycles,
84 .mask = CLOCKSOURCE_MASK(64),
85 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
86};
87
88static int set_next_event(unsigned long delta, struct clock_event_device *evt)
89{
90 /* Assuming the timer will be disabled when we enter here. */
91
92 iowrite32(1, &rtos_timer->clear);
93 iowrite32(0, &rtos_timer->clear);
94
95 iowrite32(delta, &rtos_timer->match);
96 iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
97 return 0;
98}
99
100/*
101 * Sets the mode (periodic, shutdown, oneshot, etc) of a timer.
102 */
103static void set_mode(enum clock_event_mode mode,
104 struct clock_event_device *evt)
105{
106 switch (mode) {
107 case CLOCK_EVT_MODE_SHUTDOWN:
108 /* XXX implement me */
109 default:
110 break;
111 }
112}
113
114#ifdef CONFIG_SMP
115/* Broadcast mechanism */
116static void broadcast(const struct cpumask *mask)
117{
118 send_ipi(mask, IPI_TIMER);
119}
120#endif
121
122static struct clock_event_device hexagon_clockevent_dev = {
123 .name = "clockevent",
124 .features = CLOCK_EVT_FEAT_ONESHOT,
125 .rating = 400,
126 .irq = RTOS_TIMER_INT,
127 .set_next_event = set_next_event,
128 .set_mode = set_mode,
129#ifdef CONFIG_SMP
130 .broadcast = broadcast,
131#endif
132};
133
134#ifdef CONFIG_SMP
135static DEFINE_PER_CPU(struct clock_event_device, clock_events);
136
137void setup_percpu_clockdev(void)
138{
139 int cpu = smp_processor_id();
140 struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
141 struct clock_event_device *dummy_clock_dev =
142 &per_cpu(clock_events, cpu);
143
144 memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
145 INIT_LIST_HEAD(&dummy_clock_dev->list);
146
147 dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
148 dummy_clock_dev->cpumask = cpumask_of(cpu);
149 dummy_clock_dev->mode = CLOCK_EVT_MODE_UNUSED;
150
151 clockevents_register_device(dummy_clock_dev);
152}
153
154/* Called from smp.c for each CPU's timer ipi call */
155void ipi_timer(void)
156{
157 int cpu = smp_processor_id();
158 struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
159
160 ce_dev->event_handler(ce_dev);
161}
162#endif /* CONFIG_SMP */
163
164static irqreturn_t timer_interrupt(int irq, void *devid)
165{
166 struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
167
168 iowrite32(0, &rtos_timer->enable);
169 ce_dev->event_handler(ce_dev);
170
171 return IRQ_HANDLED;
172}
173
174/* This should also be pulled from devtree */
175static struct irqaction rtos_timer_intdesc = {
176 .handler = timer_interrupt,
177 .flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
178 .name = "rtos_timer"
179};
180
181/*
182 * time_init_deferred - called by start_kernel to set up timer/clock source
183 *
184 * Install the IRQ handler for the clock, setup timers.
185 * This is done late, as that way, we can use ioremap().
186 *
187 * This runs just before the delay loop is calibrated, and
188 * is used for delay calibration.
189 */
190void __init time_init_deferred(void)
191{
192 struct resource *resource = NULL;
193 struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
194 struct device_node *dn;
195 struct resource r;
196 int err;
197
198 ce_dev->cpumask = cpu_all_mask;
199
200 if (!resource)
201 resource = rtos_timer_device.resource;
202
203 /* ioremap here means this has to run later, after paging init */
204 rtos_timer = ioremap(resource->start, resource_size(resource));
205
206 if (!rtos_timer) {
207 release_mem_region(resource->start, resource_size(resource));
208 }
209 clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
210
211 /* Note: the sim generic RTOS clock is apparently really 18750Hz */
212
213 /*
214 * Last arg is some guaranteed seconds for which the conversion will
215 * work without overflow.
216 */
217 clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
218
219 ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
220 ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
221
222#ifdef CONFIG_SMP
223 setup_percpu_clockdev();
224#endif
225
226 clockevents_register_device(ce_dev);
227 setup_irq(ce_dev->irq, &rtos_timer_intdesc);
228}
229
230void __init time_init(void)
231{
232 late_time_init = time_init_deferred;
233}
234
235/*
236 * This could become parametric or perhaps even computed at run-time,
237 * but for now we take the observed simulator jitter.
238 */
239static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
240
241void __udelay(unsigned long usecs)
242{
243 unsigned long long start = __vmgettime();
244 unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
245
246 while ((__vmgettime() - start) < finish)
247 cpu_relax(); /* not sure how this improves readability */
248}
249EXPORT_SYMBOL(__udelay);
diff --git a/arch/hexagon/kernel/topology.c b/arch/hexagon/kernel/topology.c
deleted file mode 100644
index 352f27e809f..00000000000
--- a/arch/hexagon/kernel/topology.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * CPU topology for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
23#include <linux/init.h>
24#include <linux/node.h>
25#include <linux/nodemask.h>
26#include <linux/percpu.h>
27
28/* Swiped from MIPS. */
29
30static DEFINE_PER_CPU(struct cpu, cpu_devices);
31
32static int __init topology_init(void)
33{
34 int i, ret;
35
36 for_each_present_cpu(i) {
37
38 /*
39 * register_cpu takes a per_cpu pointer and
40 * just points it at another per_cpu struct...
41 */
42
43 ret = register_cpu(&per_cpu(cpu_devices, i), i);
44 if (ret)
45 printk(KERN_WARNING "topology_init: register_cpu %d "
46 "failed (%d)\n", i, ret);
47 }
48
49 return 0;
50}
51
52subsys_initcall(topology_init);
diff --git a/arch/hexagon/kernel/trampoline.S b/arch/hexagon/kernel/trampoline.S
deleted file mode 100644
index 18110a9056b..00000000000
--- a/arch/hexagon/kernel/trampoline.S
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19/*
20 * Trampoline sequences to be copied onto user stack.
21 * This consumes a little more space than hand-assembling
22 * immediate constants for use in C, but is more portable
23 * to future tweaks to the Hexagon instruction set.
24 */
25
26#include <asm/unistd.h>
27
28/* Sig trampolines - call sys_sigreturn or sys_rt_sigreturn as appropriate */
29
30/* plain sigreturn is gone. */
31
32 .globl __rt_sigtramp_template
33__rt_sigtramp_template:
34 r6 = #__NR_rt_sigreturn;
35 trap0(#1);
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
deleted file mode 100644
index a41eeb8eeaa..00000000000
--- a/arch/hexagon/kernel/traps.c
+++ /dev/null
@@ -1,454 +0,0 @@
1/*
2 * Kernel traps/events for Hexagon processor
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/sched.h>
23#include <linux/module.h>
24#include <linux/kallsyms.h>
25#include <linux/kdebug.h>
26#include <linux/syscalls.h>
27#include <linux/signal.h>
28#include <linux/tracehook.h>
29#include <asm/traps.h>
30#include <asm/vm_fault.h>
31#include <asm/syscall.h>
32#include <asm/registers.h>
33#include <asm/unistd.h>
34#include <asm/sections.h>
35#ifdef CONFIG_KGDB
36# include <linux/kgdb.h>
37#endif
38
39#define TRAP_SYSCALL 1
40#define TRAP_DEBUG 0xdb
41
42void __init trap_init(void)
43{
44}
45
46#ifdef CONFIG_GENERIC_BUG
47/* Maybe should resemble arch/sh/kernel/traps.c ?? */
48int is_valid_bugaddr(unsigned long addr)
49{
50 return 1;
51}
52#endif /* CONFIG_GENERIC_BUG */
53
54static const char *ex_name(int ex)
55{
56 switch (ex) {
57 case HVM_GE_C_XPROT:
58 case HVM_GE_C_XUSER:
59 return "Execute protection fault";
60 case HVM_GE_C_RPROT:
61 case HVM_GE_C_RUSER:
62 return "Read protection fault";
63 case HVM_GE_C_WPROT:
64 case HVM_GE_C_WUSER:
65 return "Write protection fault";
66 case HVM_GE_C_XMAL:
67 return "Misaligned instruction";
68 case HVM_GE_C_RMAL:
69 return "Misaligned data load";
70 case HVM_GE_C_WMAL:
71 return "Misaligned data store";
72 case HVM_GE_C_INVI:
73 case HVM_GE_C_PRIVI:
74 return "Illegal instruction";
75 case HVM_GE_C_BUS:
76 return "Precise bus error";
77 case HVM_GE_C_CACHE:
78 return "Cache error";
79
80 case 0xdb:
81 return "Debugger trap";
82
83 default:
84 return "Unrecognized exception";
85 }
86}
87
88static void do_show_stack(struct task_struct *task, unsigned long *fp,
89 unsigned long ip)
90{
91 int kstack_depth_to_print = 24;
92 unsigned long offset, size;
93 const char *name = NULL;
94 unsigned long *newfp;
95 unsigned long low, high;
96 char tmpstr[128];
97 char *modname;
98 int i;
99
100 if (task == NULL)
101 task = current;
102
103 printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
104 raw_smp_processor_id(), task->comm,
105 task_pid_nr(task));
106
107 if (fp == NULL) {
108 if (task == current) {
109 asm("%0 = r30" : "=r" (fp));
110 } else {
111 fp = (unsigned long *)
112 ((struct hexagon_switch_stack *)
113 task->thread.switch_sp)->fp;
114 }
115 }
116
117 if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
118 printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
119 return;
120 }
121
122 /* Saved link reg is one word above FP */
123 if (!ip)
124 ip = *(fp+1);
125
126 /* Expect kernel stack to be in-bounds */
127 low = (unsigned long)task_stack_page(task);
128 high = low + THREAD_SIZE - 8;
129 low += sizeof(struct thread_info);
130
131 for (i = 0; i < kstack_depth_to_print; i++) {
132
133 name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
134
135 printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
136 offset);
137 if (((unsigned long) fp < low) || (high < (unsigned long) fp))
138 printk(KERN_CONT " (FP out of bounds!)");
139 if (modname)
140 printk(KERN_CONT " [%s] ", modname);
141 printk(KERN_CONT "\n");
142
143 newfp = (unsigned long *) *fp;
144
145 if (((unsigned long) newfp) & 0x3) {
146 printk(KERN_INFO "-- Corrupt frame pointer %p\n",
147 newfp);
148 break;
149 }
150
151 /* Attempt to continue past exception. */
152 if (0 == newfp) {
153 struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
154 + 8);
155
156 if (regs->syscall_nr != -1) {
157 printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
158 regs->syscall_nr);
159 printk(KERN_CONT " psp: %lx elr: %lx\n",
160 pt_psp(regs), pt_elr(regs));
161 break;
162 } else {
163 /* really want to see more ... */
164 kstack_depth_to_print += 6;
165 printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
166 ex_name(pt_cause(regs)), pt_cause(regs),
167 pt_badva(regs));
168 }
169
170 newfp = (unsigned long *) regs->r30;
171 ip = pt_elr(regs);
172 } else {
173 ip = *(newfp + 1);
174 }
175
176 /* If link reg is null, we are done. */
177 if (ip == 0x0)
178 break;
179
180 /* If newfp isn't larger, we're tracing garbage. */
181 if (newfp > fp)
182 fp = newfp;
183 else
184 break;
185 }
186}
187
188void show_stack(struct task_struct *task, unsigned long *fp)
189{
190 /* Saved link reg is one word above FP */
191 do_show_stack(task, fp, 0);
192}
193
194void dump_stack(void)
195{
196 unsigned long *fp;
197 asm("%0 = r30" : "=r" (fp));
198 show_stack(current, fp);
199}
200EXPORT_SYMBOL(dump_stack);
201
202int die(const char *str, struct pt_regs *regs, long err)
203{
204 static struct {
205 spinlock_t lock;
206 int counter;
207 } die = {
208 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
209 .counter = 0
210 };
211
212 console_verbose();
213 oops_enter();
214
215 spin_lock_irq(&die.lock);
216 bust_spinlocks(1);
217 printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
218
219 if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
220 NOTIFY_STOP)
221 return 1;
222
223 print_modules();
224 show_regs(regs);
225 do_show_stack(current, &regs->r30, pt_elr(regs));
226
227 bust_spinlocks(0);
228 add_taint(TAINT_DIE);
229
230 spin_unlock_irq(&die.lock);
231
232 if (in_interrupt())
233 panic("Fatal exception in interrupt");
234
235 if (panic_on_oops)
236 panic("Fatal exception");
237
238 oops_exit();
239 do_exit(err);
240 return 0;
241}
242
243int die_if_kernel(char *str, struct pt_regs *regs, long err)
244{
245 if (!user_mode(regs))
246 return die(str, regs, err);
247 else
248 return 0;
249}
250
251/*
252 * It's not clear that misaligned fetches are ever recoverable.
253 */
254static void misaligned_instruction(struct pt_regs *regs)
255{
256 die_if_kernel("Misaligned Instruction", regs, 0);
257 force_sig(SIGBUS, current);
258}
259
260/*
261 * Misaligned loads and stores, on the other hand, can be
262 * emulated, and probably should be, some day. But for now
263 * they will be considered fatal.
264 */
265static void misaligned_data_load(struct pt_regs *regs)
266{
267 die_if_kernel("Misaligned Data Load", regs, 0);
268 force_sig(SIGBUS, current);
269}
270
271static void misaligned_data_store(struct pt_regs *regs)
272{
273 die_if_kernel("Misaligned Data Store", regs, 0);
274 force_sig(SIGBUS, current);
275}
276
277static void illegal_instruction(struct pt_regs *regs)
278{
279 die_if_kernel("Illegal Instruction", regs, 0);
280 force_sig(SIGILL, current);
281}
282
283/*
284 * Precise bus errors may be recoverable with a a retry,
285 * but for now, treat them as irrecoverable.
286 */
287static void precise_bus_error(struct pt_regs *regs)
288{
289 die_if_kernel("Precise Bus Error", regs, 0);
290 force_sig(SIGBUS, current);
291}
292
293/*
294 * If anything is to be done here other than panic,
295 * it will probably be complex and migrate to another
296 * source module. For now, just die.
297 */
298static void cache_error(struct pt_regs *regs)
299{
300 die("Cache Error", regs, 0);
301}
302
303/*
304 * General exception handler
305 */
306void do_genex(struct pt_regs *regs)
307{
308 /*
309 * Decode Cause and Dispatch
310 */
311 switch (pt_cause(regs)) {
312 case HVM_GE_C_XPROT:
313 case HVM_GE_C_XUSER:
314 execute_protection_fault(regs);
315 break;
316 case HVM_GE_C_RPROT:
317 case HVM_GE_C_RUSER:
318 read_protection_fault(regs);
319 break;
320 case HVM_GE_C_WPROT:
321 case HVM_GE_C_WUSER:
322 write_protection_fault(regs);
323 break;
324 case HVM_GE_C_XMAL:
325 misaligned_instruction(regs);
326 break;
327 case HVM_GE_C_RMAL:
328 misaligned_data_load(regs);
329 break;
330 case HVM_GE_C_WMAL:
331 misaligned_data_store(regs);
332 break;
333 case HVM_GE_C_INVI:
334 case HVM_GE_C_PRIVI:
335 illegal_instruction(regs);
336 break;
337 case HVM_GE_C_BUS:
338 precise_bus_error(regs);
339 break;
340 case HVM_GE_C_CACHE:
341 cache_error(regs);
342 break;
343 default:
344 /* Halt and catch fire */
345 panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
346 break;
347 }
348}
349
350/* Indirect system call dispatch */
351long sys_syscall(void)
352{
353 printk(KERN_ERR "sys_syscall invoked!\n");
354 return -ENOSYS;
355}
356
357void do_trap0(struct pt_regs *regs)
358{
359 unsigned long syscallret = 0;
360 syscall_fn syscall;
361
362 switch (pt_cause(regs)) {
363 case TRAP_SYSCALL:
364 /* System call is trap0 #1 */
365
366 /* allow strace to catch syscall args */
367 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
368 tracehook_report_syscall_entry(regs)))
369 return; /* return -ENOSYS somewhere? */
370
371 /* Interrupts should be re-enabled for syscall processing */
372 __vmsetie(VM_INT_ENABLE);
373
374 /*
375 * System call number is in r6, arguments in r0..r5.
376 * Fortunately, no Linux syscall has more than 6 arguments,
377 * and Hexagon ABI passes first 6 arguments in registers.
378 * 64-bit arguments are passed in odd/even register pairs.
379 * Fortunately, we have no system calls that take more
380 * than three arguments with more than one 64-bit value.
381 * Should that change, we'd need to redesign to copy
382 * between user and kernel stacks.
383 */
384 regs->syscall_nr = regs->r06;
385
386 /*
387 * GPR R0 carries the first parameter, and is also used
388 * to report the return value. We need a backup of
389 * the user's value in case we need to do a late restart
390 * of the system call.
391 */
392 regs->restart_r0 = regs->r00;
393
394 if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
395 regs->r00 = -1;
396 } else {
397 syscall = (syscall_fn)
398 (sys_call_table[regs->syscall_nr]);
399 syscallret = syscall(regs->r00, regs->r01,
400 regs->r02, regs->r03,
401 regs->r04, regs->r05);
402 }
403
404 /*
405 * If it was a sigreturn system call, don't overwrite
406 * r0 value in stack frame with return value.
407 *
408 * __NR_sigreturn doesn't seem to exist in new unistd.h
409 */
410
411 if (regs->syscall_nr != __NR_rt_sigreturn)
412 regs->r00 = syscallret;
413
414 /* allow strace to get the syscall return state */
415 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
416 tracehook_report_syscall_exit(regs, 0);
417
418 break;
419 case TRAP_DEBUG:
420 /* Trap0 0xdb is debug breakpoint */
421 if (user_mode(regs)) {
422 struct siginfo info;
423
424 info.si_signo = SIGTRAP;
425 info.si_errno = 0;
426 /*
427 * Some architecures add some per-thread state
428 * to distinguish between breakpoint traps and
429 * trace traps. We may want to do that, and
430 * set the si_code value appropriately, or we
431 * may want to use a different trap0 flavor.
432 */
433 info.si_code = TRAP_BRKPT;
434 info.si_addr = (void __user *) pt_elr(regs);
435 send_sig_info(SIGTRAP, &info, current);
436 } else {
437#ifdef CONFIG_KGDB
438 kgdb_handle_exception(pt_cause(regs), SIGTRAP,
439 TRAP_BRKPT, regs);
440#endif
441 }
442 break;
443 }
444 /* Ignore other trap0 codes for now, especially 0 (Angel calls) */
445}
446
447/*
448 * Machine check exception handler
449 */
450void do_machcheck(struct pt_regs *regs)
451{
452 /* Halt and catch fire */
453 __vmstop();
454}
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
deleted file mode 100644
index 0bf5a87e4d0..00000000000
--- a/arch/hexagon/kernel/vdso.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * vDSO implementation for Hexagon
3 *
4 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/vmalloc.h>
24#include <linux/binfmts.h>
25
26#include <asm/vdso.h>
27
28static struct page *vdso_page;
29
30/* Create a vDSO page holding the signal trampoline.
31 * We want this for a non-executable stack.
32 */
33static int __init vdso_init(void)
34{
35 struct hexagon_vdso *vdso;
36
37 vdso_page = alloc_page(GFP_KERNEL);
38 if (!vdso_page)
39 panic("Cannot allocate vdso");
40
41 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
42 if (!vdso)
43 panic("Cannot map vdso");
44 clear_page(vdso);
45
46 /* Install the signal trampoline; currently looks like this:
47 * r6 = #__NR_rt_sigreturn;
48 * trap0(#1);
49 */
50 vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
51 vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
52
53 vunmap(vdso);
54
55 return 0;
56}
57arch_initcall(vdso_init);
58
59/*
60 * Called from binfmt_elf. Create a VMA for the vDSO page.
61 */
62int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
63{
64 int ret;
65 unsigned long vdso_base;
66 struct mm_struct *mm = current->mm;
67
68 down_write(&mm->mmap_sem);
69
70 /* Try to get it loaded right near ld.so/glibc. */
71 vdso_base = STACK_TOP;
72
73 vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
74 if (IS_ERR_VALUE(vdso_base)) {
75 ret = vdso_base;
76 goto up_fail;
77 }
78
79 /* MAYWRITE to allow gdb to COW and set breakpoints. */
80 ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
81 VM_READ|VM_EXEC|
82 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
83 &vdso_page);
84
85 if (ret)
86 goto up_fail;
87
88 mm->context.vdso = (void *)vdso_base;
89
90up_fail:
91 up_write(&mm->mmap_sem);
92 return ret;
93}
94
95const char *arch_vma_name(struct vm_area_struct *vma)
96{
97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
98 return "[vdso]";
99 return NULL;
100}
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
deleted file mode 100644
index 425e50c694f..00000000000
--- a/arch/hexagon/kernel/vm_entry.S
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * Event entry/exit for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/asm-offsets.h> /* assembly-safer versions of C defines */
22#include <asm/mem-layout.h> /* sigh, except for page_offset */
23#include <asm/hexagon_vm.h>
24#include <asm/thread_info.h>
25
26/*
27 * Entry into guest-mode Linux under Hexagon Virtual Machine.
28 * Stack pointer points to event record - build pt_regs on top of it,
29 * set up a plausible C stack frame, and dispatch to the C handler.
30 * On return, do vmrte virtual instruction with SP where we started.
31 *
32 * VM Spec 0.5 uses a trap to fetch HVM record now.
33 */
34
35/*
36 * Save full register state, while setting up thread_info struct
37 * pointer derived from kernel stack pointer in THREADINFO_REG
38 * register, putting prior thread_info.regs pointer in a callee-save
39 * register (R24, which had better not ever be assigned to THREADINFO_REG),
40 * and updating thread_info.regs to point to current stack frame,
41 * so as to support nested events in kernel mode.
42 *
43 * As this is common code, we set the pt_regs system call number
44 * to -1 for all events. It will be replaced with the system call
45 * number in the case where we decode a system call (trap0(#1)).
46 */
47
48#define save_pt_regs()\
49 memd(R0 + #_PT_R3130) = R31:30; \
50 { memw(R0 + #_PT_R2928) = R28; \
51 R31 = memw(R0 + #_PT_ER_VMPSP); }\
52 { memw(R0 + #(_PT_R2928 + 4)) = R31; \
53 R31 = ugp; } \
54 { memd(R0 + #_PT_R2726) = R27:26; \
55 R30 = gp ; } \
56 memd(R0 + #_PT_R2524) = R25:24; \
57 memd(R0 + #_PT_R2322) = R23:22; \
58 memd(R0 + #_PT_R2120) = R21:20; \
59 memd(R0 + #_PT_R1918) = R19:18; \
60 memd(R0 + #_PT_R1716) = R17:16; \
61 memd(R0 + #_PT_R1514) = R15:14; \
62 memd(R0 + #_PT_R1312) = R13:12; \
63 { memd(R0 + #_PT_R1110) = R11:10; \
64 R15 = lc0; } \
65 { memd(R0 + #_PT_R0908) = R9:8; \
66 R14 = sa0; } \
67 { memd(R0 + #_PT_R0706) = R7:6; \
68 R13 = lc1; } \
69 { memd(R0 + #_PT_R0504) = R5:4; \
70 R12 = sa1; } \
71 { memd(R0 + #_PT_UGPGP) = R31:30; \
72 R11 = m1; \
73 R2.H = #HI(_THREAD_SIZE); } \
74 { memd(R0 + #_PT_LC0SA0) = R15:14; \
75 R10 = m0; \
76 R2.L = #LO(_THREAD_SIZE); } \
77 { memd(R0 + #_PT_LC1SA1) = R13:12; \
78 R15 = p3:0; \
79 R2 = neg(R2); } \
80 { memd(R0 + #_PT_M1M0) = R11:10; \
81 R14 = usr; \
82 R2 = and(R0,R2); } \
83 { memd(R0 + #_PT_PREDSUSR) = R15:14; \
84 THREADINFO_REG = R2; } \
85 { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
86 memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
87 R2 = #-1; } \
88 { memw(R0 + #_PT_SYSCALL_NR) = R2; \
89 R30 = #0; }
90
91/*
92 * Restore registers and thread_info.regs state. THREADINFO_REG
93 * is assumed to still be sane, and R24 to have been correctly
94 * preserved. Don't restore R29 (SP) until later.
95 */
96
97#define restore_pt_regs() \
98 { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
99 R15:14 = memd(R0 + #_PT_PREDSUSR); } \
100 { R11:10 = memd(R0 + #_PT_M1M0); \
101 p3:0 = R15; } \
102 { R13:12 = memd(R0 + #_PT_LC1SA1); \
103 usr = R14; } \
104 { R15:14 = memd(R0 + #_PT_LC0SA0); \
105 m1 = R11; } \
106 { R3:2 = memd(R0 + #_PT_R0302); \
107 m0 = R10; } \
108 { R5:4 = memd(R0 + #_PT_R0504); \
109 lc1 = R13; } \
110 { R7:6 = memd(R0 + #_PT_R0706); \
111 sa1 = R12; } \
112 { R9:8 = memd(R0 + #_PT_R0908); \
113 lc0 = R15; } \
114 { R11:10 = memd(R0 + #_PT_R1110); \
115 sa0 = R14; } \
116 { R13:12 = memd(R0 + #_PT_R1312); \
117 R15:14 = memd(R0 + #_PT_R1514); } \
118 { R17:16 = memd(R0 + #_PT_R1716); \
119 R19:18 = memd(R0 + #_PT_R1918); } \
120 { R21:20 = memd(R0 + #_PT_R2120); \
121 R23:22 = memd(R0 + #_PT_R2322); } \
122 { R25:24 = memd(R0 + #_PT_R2524); \
123 R27:26 = memd(R0 + #_PT_R2726); } \
124 R31:30 = memd(R0 + #_PT_UGPGP); \
125 { R28 = memw(R0 + #_PT_R2928); \
126 ugp = R31; } \
127 { R31:30 = memd(R0 + #_PT_R3130); \
128 gp = R30; }
129
130 /*
131 * Clears off enough space for the rest of pt_regs; evrec is a part
132 * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1.
133 * R0 is the address of pt_regs and is the parameter to save_pt_regs.
134 */
135
136/*
137 * Since the HVM isn't automagically pushing the EVREC onto the stack anymore,
138 * we'll subract the entire size out and then fill it in ourselves.
139 * Need to save off R0, R1, R2, R3 immediately.
140 */
141
142#define vm_event_entry(CHandler) \
143 { \
144 R29 = add(R29, #-(_PT_REGS_SIZE)); \
145 memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
146 } \
147 { \
148 memd(R29 +#_PT_R0302) = R3:2; \
149 } \
150 trap1(#HVM_TRAP1_VMGETREGS); \
151 { \
152 memd(R29 + #_PT_ER_VMEL) = R1:0; \
153 R0 = R29; \
154 R1.L = #LO(CHandler); \
155 } \
156 { \
157 memd(R29 + #_PT_ER_VMPSP) = R3:2; \
158 R1.H = #HI(CHandler); \
159 jump event_dispatch; \
160 }
161
162.text
163 /*
164 * Do bulk save/restore in one place.
165 * Adds a jump to dispatch latency, but
166 * saves hundreds of bytes.
167 */
168
169event_dispatch:
170 save_pt_regs()
171 callr r1
172
173 /*
174 * If we were in kernel mode, we don't need to check scheduler
175 * or signals if CONFIG_PREEMPT is not set. If set, then it has
176 * to jump to a need_resched kind of block.
177 * BTW, CONFIG_PREEMPT is not supported yet.
178 */
179
180#ifdef CONFIG_PREEMPT
181 R0 = #VM_INT_DISABLE
182 trap1(#HVM_TRAP1_VMSETIE)
183#endif
184
185 /* "Nested control path" -- if the previous mode was kernel */
186 R0 = memw(R29 + #_PT_ER_VMEST);
187 P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
188 if !P0 jump restore_all;
189 /*
190 * Returning from system call, normally coming back from user mode
191 */
192return_from_syscall:
193 /* Disable interrupts while checking TIF */
194 R0 = #VM_INT_DISABLE
195 trap1(#HVM_TRAP1_VMSETIE)
196
197 /*
198 * Coming back from the C-world, our thread info pointer
199 * should be in the designated register (usually R19)
200 */
201 R1.L = #LO(_TIF_ALLWORK_MASK)
202 {
203 R1.H = #HI(_TIF_ALLWORK_MASK);
204 R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
205 }
206
207 /*
208 * Compare against the "return to userspace" _TIF_WORK_MASK
209 */
210 R1 = and(R1,R0);
211 { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;}
212 jump restore_all; /* we're outta here! */
213
214work_pending:
215 {
216 P0 = tstbit(R1, #TIF_NEED_RESCHED);
217 if (!P0.new) jump:nt work_notifysig;
218 }
219 call schedule
220 jump return_from_syscall; /* check for more work */
221
222work_notifysig:
223 /* this is the part that's kind of fuzzy. */
224 R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME));
225 P0 = cmp.eq(R1, #0);
226 if P0 jump restore_all
227 R1 = R0; /* unsigned long thread_info_flags */
228 R0 = R29; /* regs should still be at top of stack */
229 call do_notify_resume
230
231restore_all:
232 /* Disable interrupts, if they weren't already, before reg restore. */
233 R0 = #VM_INT_DISABLE
234 trap1(#HVM_TRAP1_VMSETIE)
235
236 /* do the setregs here for VM 0.5 */
237 /* R29 here should already be pointing at pt_regs */
238 R1:0 = memd(R29 + #_PT_ER_VMEL);
239 R3:2 = memd(R29 + #_PT_ER_VMPSP);
240 trap1(#HVM_TRAP1_VMSETREGS);
241
242 R0 = R29
243 restore_pt_regs()
244 R1:0 = memd(R29 + #_PT_R0100);
245 R29 = add(R29, #_PT_REGS_SIZE);
246 trap1(#HVM_TRAP1_VMRTE)
247 /* Notreached */
248
249 .globl _K_enter_genex
250_K_enter_genex:
251 vm_event_entry(do_genex)
252
253 .globl _K_enter_interrupt
254_K_enter_interrupt:
255 vm_event_entry(arch_do_IRQ)
256
257 .globl _K_enter_trap0
258_K_enter_trap0:
259 vm_event_entry(do_trap0)
260
261 .globl _K_enter_machcheck
262_K_enter_machcheck:
263 vm_event_entry(do_machcheck)
264
265
266 .globl ret_from_fork
267ret_from_fork:
268 call schedule_tail
269 P0 = cmp.eq(R24, #0);
270 if P0 jump return_from_syscall
271 R0 = R25;
272 callr R24
273 jump return_from_syscall
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
deleted file mode 100644
index 9b5a4a295a6..00000000000
--- a/arch/hexagon/kernel/vm_events.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * Mostly IRQ support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/kernel.h>
22#include <asm/registers.h>
23#include <linux/irq.h>
24#include <linux/hardirq.h>
25
26/*
27 * show_regs - print pt_regs structure
28 * @regs: pointer to pt_regs
29 *
30 * To-do: add all the accessor definitions to registers.h
31 *
32 * Will make this routine a lot easier to write.
33 */
34void show_regs(struct pt_regs *regs)
35{
36 printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
37 regs->restart_r0, regs->syscall_nr);
38 printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
39 printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n",
40 regs->lc0, regs->sa0, regs->m0);
41 printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n",
42 regs->lc1, regs->sa1, regs->m1);
43 printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
44 regs->gp, regs->ugp, regs->usr);
45 printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
46 regs->r01,
47 regs->r02,
48 regs->r03);
49 printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04,
50 regs->r05,
51 regs->r06,
52 regs->r07);
53 printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08,
54 regs->r09,
55 regs->r10,
56 regs->r11);
57 printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12,
58 regs->r13,
59 regs->r14,
60 regs->r15);
61 printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16,
62 regs->r17,
63 regs->r18,
64 regs->r19);
65 printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20,
66 regs->r21,
67 regs->r22,
68 regs->r23);
69 printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24,
70 regs->r25,
71 regs->r26,
72 regs->r27);
73 printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28,
74 regs->r29,
75 regs->r30,
76 regs->r31);
77
78 printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n",
79 pt_elr(regs), pt_cause(regs), user_mode(regs));
80 printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n",
81 pt_psp(regs), pt_badva(regs), ints_enabled(regs));
82}
83
84void dummy_handler(struct pt_regs *regs)
85{
86 unsigned int elr = pt_elr(regs);
87 printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
88}
89
90
91void arch_do_IRQ(struct pt_regs *regs)
92{
93 int irq = pt_cause(regs);
94 struct pt_regs *old_regs = set_irq_regs(regs);
95
96 irq_enter();
97 generic_handle_irq(irq);
98 irq_exit();
99 set_irq_regs(old_regs);
100}
diff --git a/arch/hexagon/kernel/vm_init_segtable.S b/arch/hexagon/kernel/vm_init_segtable.S
deleted file mode 100644
index 80967f2192b..00000000000
--- a/arch/hexagon/kernel/vm_init_segtable.S
+++ /dev/null
@@ -1,442 +0,0 @@
1/*
2 * Initial page table for Linux kernel under Hexagon VM,
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * These tables are pre-computed and linked into kernel.
23 */
24
25#include <asm/vm_mmu.h>
26/* #include <asm/iomap.h> */
27
28/*
29 * Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages.
30 * No user mode access, RWX, write-back cache. The entry needs
31 * to be replicated for all 4 virtual segments mapping to the page.
32 */
33
34/* "Big Kernel Page" */
35#define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \
36 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
37 | __HEXAGON_C_WB_L2 << 6 \
38 | __HVM_PDE_S_16MB)
39
40/* No cache version */
41
42#define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \
43 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
44 | __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 )
45
46#define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
47 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
48 | __HEXAGON_C_DEV << 6 )
49
50#define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
51 | __HVM_PDE_S_4KB )
52
53#define X __HVM_PDE_S_INVALID
54
55 .p2align 12
56 .globl swapper_pg_dir
57 .globl _K_init_segtable
58swapper_pg_dir:
59/* VA 0x00000000 */
60 .word X,X,X,X
61 .word X,X,X,X
62 .word X,X,X,X
63 .word X,X,X,X
64 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
65 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
66 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
67 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
68 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
69 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
70 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
71 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
72/* VA 0x40000000 */
73 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
74 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
75 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
76 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
77 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
78 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
79 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
80 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
81/* VA 0x80000000 */
82 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
83 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
84 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
85 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
86 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
87/*0xa8*/.word X,X,X,X
88#ifdef CONFIG_COMET_EARLY_UART_DEBUG
89UART_PTE_ENTRY:
90/*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000)
91#else
92/*0xa9*/.word X,X,X,X
93#endif
94/*0xaa*/.word X,X,X,X
95/*0xab*/.word X,X,X,X
96/*0xac*/.word X,X,X,X
97/*0xad*/.word X,X,X,X
98/*0xae*/.word X,X,X,X
99/*0xaf*/.word X,X,X,X
100/*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
101 .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
102_K_init_segtable:
103/* VA 0xC0000000 */
104 .word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000)
105 .word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000)
106 .word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000)
107 .word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000)
108 .word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000)
109 .word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000)
110 .word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000)
111 .word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000)
112
113 .word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000)
114 .word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000)
115 .word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000)
116 .word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000)
117 .word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000)
118 .word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000)
119 .word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000)
120 .word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000)
121
122 .word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000)
123 .word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000)
124 .word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000)
125 .word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000)
126 .word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000)
127 .word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000)
128 .word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000)
129 .word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000)
130
131 .word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000)
132 .word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000)
133 .word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000)
134 .word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000)
135 .word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000)
136 .word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000)
137 .word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000)
138 .word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000)
139
140 .word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000)
141 .word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000)
142 .word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000)
143 .word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000)
144 .word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000)
145 .word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000)
146 .word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000)
147 .word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000)
148
149 .word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000)
150 .word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000)
151 .word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000)
152 .word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000)
153 .word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000)
154 .word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000)
155 .word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000)
156 .word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000)
157
158 .word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000)
159 .word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000)
160 .word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000)
161 .word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000)
162 .word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000)
163 .word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000)
164 .word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000)
165 .word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000)
166
167 .word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000)
168 .word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000)
169 .word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000)
170 .word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000)
171 .word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000)
172 .word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000)
173_K_io_map:
174 .word X,X,X,X /* 0x3e000000 - device IO early remap */
175 .word X,X,X,X /* 0x3f000000 - hypervisor space*/
176
177#if 0
178/*
179 * This is in here as an example for devices which need to be mapped really
180 * early.
181 */
182 .p2align 12
183 .globl _K_io_kmap
184 .globl _K_init_devicetable
185_K_init_devicetable: /* Should be 4MB worth of entries */
186 .word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X
187 .word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X
188 .word X,X,X,X
189 .word X,X,X,X
190 .word X,X,X,X
191 .word X,X,X,X
192 .word X,X,X,X
193 .word X,X,X,X
194 .word X,X,X,X
195 .word X,X,X,X
196 .word X,X,X,X
197 .word X,X,X,X
198 .word X,X,X,X
199 .word X,X,X,X
200 .word X,X,X,X
201 .word X,X,X,X
202 .word X,X,X,X
203 .word X,X,X,X
204 .word X,X,X,X
205 .word X,X,X,X
206 .word X,X,X,X
207 .word X,X,X,X
208 .word X,X,X,X
209 .word X,X,X,X
210 .word X,X,X,X
211 .word X,X,X,X
212 .word X,X,X,X
213 .word X,X,X,X
214 .word X,X,X,X
215 .word X,X,X,X
216 .word X,X,X,X
217 .word X,X,X,X
218 .word X,X,X,X
219 .word X,X,X,X
220 .word X,X,X,X
221 .word X,X,X,X
222 .word X,X,X,X
223 .word X,X,X,X
224 .word X,X,X,X
225 .word X,X,X,X
226 .word X,X,X,X
227 .word X,X,X,X
228 .word X,X,X,X
229 .word X,X,X,X
230 .word X,X,X,X
231 .word X,X,X,X
232 .word X,X,X,X
233 .word X,X,X,X
234 .word X,X,X,X
235 .word X,X,X,X
236 .word X,X,X,X
237 .word X,X,X,X
238 .word X,X,X,X
239 .word X,X,X,X
240 .word X,X,X,X
241 .word X,X,X,X
242 .word X,X,X,X
243 .word X,X,X,X
244 .word X,X,X,X
245 .word X,X,X,X
246 .word X,X,X,X
247 .word X,X,X,X
248 .word X,X,X,X
249 .word X,X,X,X
250 .word X,X,X,X
251 .word X,X,X,X
252 .word X,X,X,X
253 .word X,X,X,X
254 .word X,X,X,X
255 .word X,X,X,X
256 .word X,X,X,X
257 .word X,X,X,X
258 .word X,X,X,X
259 .word X,X,X,X
260 .word X,X,X,X
261 .word X,X,X,X
262 .word X,X,X,X
263 .word X,X,X,X
264 .word X,X,X,X
265 .word X,X,X,X
266 .word X,X,X,X
267 .word X,X,X,X
268 .word X,X,X,X
269 .word X,X,X,X
270 .word X,X,X,X
271 .word X,X,X,X
272 .word X,X,X,X
273 .word X,X,X,X
274 .word X,X,X,X
275 .word X,X,X,X
276 .word X,X,X,X
277 .word X,X,X,X
278 .word X,X,X,X
279 .word X,X,X,X
280 .word X,X,X,X
281 .word X,X,X,X
282 .word X,X,X,X
283 .word X,X,X,X
284 .word X,X,X,X
285 .word X,X,X,X
286 .word X,X,X,X
287 .word X,X,X,X
288 .word X,X,X,X
289 .word X,X,X,X
290 .word X,X,X,X
291 .word X,X,X,X
292 .word X,X,X,X
293 .word X,X,X,X
294 .word X,X,X,X
295 .word X,X,X,X
296 .word X,X,X,X
297 .word X,X,X,X
298 .word X,X,X,X
299 .word X,X,X,X
300 .word X,X,X,X
301 .word X,X,X,X
302 .word X,X,X,X
303 .word X,X,X,X
304 .word X,X,X,X
305 .word X,X,X,X
306 .word X,X,X,X
307 .word X,X,X,X
308 .word X,X,X,X
309 .word X,X,X,X
310 .word X,X,X,X
311 .word X,X,X,X
312 .word X,X,X,X
313 .word X,X,X,X
314 .word X,X,X,X
315 .word X,X,X,X
316 .word X,X,X,X
317 .word X,X,X,X
318 .word X,X,X,X
319 .word X,X,X,X
320 .word X,X,X,X
321 .word X,X,X,X
322 .word X,X,X,X
323 .word X,X,X,X
324 .word X,X,X,X
325 .word X,X,X,X
326 .word X,X,X,X
327 .word X,X,X,X
328 .word X,X,X,X
329 .word X,X,X,X
330 .word X,X,X,X
331 .word X,X,X,X
332 .word X,X,X,X
333 .word X,X,X,X
334 .word X,X,X,X
335 .word X,X,X,X
336 .word X,X,X,X
337 .word X,X,X,X
338 .word X,X,X,X
339 .word X,X,X,X
340 .word X,X,X,X
341 .word X,X,X,X
342 .word X,X,X,X
343 .word X,X,X,X
344 .word X,X,X,X
345 .word X,X,X,X
346 .word X,X,X,X
347 .word X,X,X,X
348 .word X,X,X,X
349 .word X,X,X,X
350 .word X,X,X,X
351 .word X,X,X,X
352 .word X,X,X,X
353 .word X,X,X,X
354 .word X,X,X,X
355 .word X,X,X,X
356 .word X,X,X,X
357 .word X,X,X,X
358 .word X,X,X,X
359 .word X,X,X,X
360 .word X,X,X,X
361 .word X,X,X,X
362 .word X,X,X,X
363 .word X,X,X,X
364 .word X,X,X,X
365 .word X,X,X,X
366 .word X,X,X,X
367 .word X,X,X,X
368 .word X,X,X,X
369 .word X,X,X,X
370 .word X,X,X,X
371 .word X,X,X,X
372 .word X,X,X,X
373 .word X,X,X,X
374 .word X,X,X,X
375 .word X,X,X,X
376 .word X,X,X,X
377 .word X,X,X,X
378 .word X,X,X,X
379 .word X,X,X,X
380 .word X,X,X,X
381 .word X,X,X,X
382 .word X,X,X,X
383 .word X,X,X,X
384 .word X,X,X,X
385 .word X,X,X,X
386 .word X,X,X,X
387 .word X,X,X,X
388 .word X,X,X,X
389 .word X,X,X,X
390 .word X,X,X,X
391 .word X,X,X,X
392 .word X,X,X,X
393 .word X,X,X,X
394 .word X,X,X,X
395 .word X,X,X,X
396 .word X,X,X,X
397 .word X,X,X,X
398 .word X,X,X,X
399 .word X,X,X,X
400 .word X,X,X,X
401 .word X,X,X,X
402 .word X,X,X,X
403 .word X,X,X,X
404 .word X,X,X,X
405 .word X,X,X,X
406 .word X,X,X,X
407 .word X,X,X,X
408 .word X,X,X,X
409 .word X,X,X,X
410 .word X,X,X,X
411 .word X,X,X,X
412 .word X,X,X,X
413 .word X,X,X,X
414 .word X,X,X,X
415 .word X,X,X,X
416 .word X,X,X,X
417 .word X,X,X,X
418 .word X,X,X,X
419 .word X,X,X,X
420 .word X,X,X,X
421 .word X,X,X,X
422 .word X,X,X,X
423 .word X,X,X,X
424 .word X,X,X,X
425 .word X,X,X,X
426 .word X,X,X,X
427 .word X,X,X,X
428 .word X,X,X,X
429 .word X,X,X,X
430 .word X,X,X,X
431 .word X,X,X,X
432 .word X,X,X,X
433 .word X,X,X,X
434 .word X,X,X,X
435 .word X,X,X,X
436 .word X,X,X,X
437 .word X,X,X,X
438 .word X,X,X,X
439 .word X,X,X,X
440 .word X,X,X,X
441 .word X,X,X,X
442#endif
diff --git a/arch/hexagon/kernel/vm_ops.S b/arch/hexagon/kernel/vm_ops.S
deleted file mode 100644
index 9fb77b3f6cf..00000000000
--- a/arch/hexagon/kernel/vm_ops.S
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Hexagon VM instruction support
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/linkage.h>
22#include <asm/hexagon_vm.h>
23
24/*
25 * C wrappers for virtual machine "instructions". These
26 * could be, and perhaps some day will be, handled as in-line
27 * macros, but for tracing/debugging it's handy to have
28 * a single point of invocation for each of them.
29 * Conveniently, they take paramters and return values
30 * consistent with the ABI calling convention.
31 */
32
33ENTRY(__vmrte)
34 trap1(#HVM_TRAP1_VMRTE);
35 jumpr R31;
36
37ENTRY(__vmsetvec)
38 trap1(#HVM_TRAP1_VMSETVEC);
39 jumpr R31;
40
41ENTRY(__vmsetie)
42 trap1(#HVM_TRAP1_VMSETIE);
43 jumpr R31;
44
45ENTRY(__vmgetie)
46 trap1(#HVM_TRAP1_VMGETIE);
47 jumpr R31;
48
49ENTRY(__vmintop)
50 trap1(#HVM_TRAP1_VMINTOP);
51 jumpr R31;
52
53ENTRY(__vmclrmap)
54 trap1(#HVM_TRAP1_VMCLRMAP);
55 jumpr R31;
56
57ENTRY(__vmnewmap)
58 r1 = #VM_NEWMAP_TYPE_PGTABLES;
59 trap1(#HVM_TRAP1_VMNEWMAP);
60 jumpr R31;
61
62ENTRY(__vmcache)
63 trap1(#HVM_TRAP1_VMCACHE);
64 jumpr R31;
65
66ENTRY(__vmgettime)
67 trap1(#HVM_TRAP1_VMGETTIME);
68 jumpr R31;
69
70ENTRY(__vmsettime)
71 trap1(#HVM_TRAP1_VMSETTIME);
72 jumpr R31;
73
74ENTRY(__vmwait)
75 trap1(#HVM_TRAP1_VMWAIT);
76 jumpr R31;
77
78ENTRY(__vmyield)
79 trap1(#HVM_TRAP1_VMYIELD);
80 jumpr R31;
81
82ENTRY(__vmstart)
83 trap1(#HVM_TRAP1_VMSTART);
84 jumpr R31;
85
86ENTRY(__vmstop)
87 trap1(#HVM_TRAP1_VMSTOP);
88 jumpr R31;
89
90ENTRY(__vmvpid)
91 trap1(#HVM_TRAP1_VMVPID);
92 jumpr R31;
93
94/* Probably not actually going to use these; see vm_entry.S */
95
96ENTRY(__vmsetregs)
97 trap1(#HVM_TRAP1_VMSETREGS);
98 jumpr R31;
99
100ENTRY(__vmgetregs)
101 trap1(#HVM_TRAP1_VMGETREGS);
102 jumpr R31;
diff --git a/arch/hexagon/kernel/vm_switch.S b/arch/hexagon/kernel/vm_switch.S
deleted file mode 100644
index 62c6df91b3b..00000000000
--- a/arch/hexagon/kernel/vm_switch.S
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * Context switch support for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/asm-offsets.h>
22
23.text
24
25/*
26 * The register used as a fast-path thread information pointer
27 * is determined as a kernel configuration option. If it happens
28 * to be a callee-save register, we're going to be saving and
29 * restoring it twice here.
30 *
31 * This code anticipates a revised ABI where R20-23 are added
32 * to the set of callee-save registers, but this should be
33 * backward compatible to legacy tools.
34 */
35
36
37/*
38 * void switch_to(struct task_struct *prev,
39 * struct task_struct *next, struct task_struct *last);
40 */
41 .p2align 2
42 .globl __switch_to
43 .type __switch_to, @function
44
45/*
46 * When we exit the wormhole, we need to store the previous task
47 * in the new R0's pointer. Technically it should be R2, but they should
48 * be the same; seems like a legacy thing. In short, don't butcher
49 * R0, let it go back out unmolested.
50 */
51
52__switch_to:
53 /*
54 * Push callee-saves onto "prev" stack.
55 * Here, we're sneaky because the LR and FP
56 * storage of the thread_stack structure
57 * is automagically allocated by allocframe,
58 * so we pass struct size less 8.
59 */
60 allocframe(#(_SWITCH_STACK_SIZE - 8));
61 memd(R29+#(_SWITCH_R2726))=R27:26;
62 memd(R29+#(_SWITCH_R2524))=R25:24;
63 memd(R29+#(_SWITCH_R2322))=R23:22;
64 memd(R29+#(_SWITCH_R2120))=R21:20;
65 memd(R29+#(_SWITCH_R1918))=R19:18;
66 memd(R29+#(_SWITCH_R1716))=R17:16;
67 /* Stash thread_info pointer in task_struct */
68 memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG;
69 memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29;
70 /* Switch to "next" stack and restore callee saves from there */
71 R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP));
72 {
73 R27:26 = memd(R29+#(_SWITCH_R2726));
74 R25:24 = memd(R29+#(_SWITCH_R2524));
75 }
76 {
77 R23:22 = memd(R29+#(_SWITCH_R2322));
78 R21:20 = memd(R29+#(_SWITCH_R2120));
79 }
80 {
81 R19:18 = memd(R29+#(_SWITCH_R1918));
82 R17:16 = memd(R29+#(_SWITCH_R1716));
83 }
84 {
85 /* THREADINFO_REG is currently one of the callee-saved regs
86 * above, and so be sure to re-load it last.
87 */
88 THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO);
89 R31:30 = memd(R29+#_SWITCH_FP);
90 }
91 {
92 R29 = add(R29,#_SWITCH_STACK_SIZE);
93 jumpr R31;
94 }
95 .size __switch_to, .-__switch_to
diff --git a/arch/hexagon/kernel/vm_vectors.S b/arch/hexagon/kernel/vm_vectors.S
deleted file mode 100644
index 620f42cc582..00000000000
--- a/arch/hexagon/kernel/vm_vectors.S
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Event jump tables
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/hexagon_vm.h>
22
23.text
24
25/* This is registered early on to allow angel */
26.global _K_provisional_vec
27_K_provisional_vec:
28 jump 1f;
29 jump 1f;
30 jump 1f;
31 jump 1f;
32 jump 1f;
33 trap1(#HVM_TRAP1_VMRTE)
34 jump 1f;
35 jump 1f;
36
37
38.global _K_VM_event_vector
39_K_VM_event_vector:
401:
41 jump 1b; /* Reset */
42 jump _K_enter_machcheck;
43 jump _K_enter_genex;
44 jump 1b; /* 3 Rsvd */
45 jump 1b; /* 4 Rsvd */
46 jump _K_enter_trap0;
47 jump 1b; /* 6 Rsvd */
48 jump _K_enter_interrupt;
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
deleted file mode 100644
index 14e793f6abb..00000000000
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Linker script for Hexagon kernel
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#define LOAD_OFFSET PAGE_OFFSET
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
25#include <asm/mem-layout.h> /* except for page_offset */
26#include <asm/cache.h> /* and now we're pulling cache line size */
27OUTPUT_ARCH(hexagon)
28ENTRY(stext)
29
30jiffies = jiffies_64;
31
32/*
33See asm-generic/vmlinux.lds.h for expansion of some of these macros.
34See asm-generic/sections.h for seemingly required labels.
35*/
36
37#define PAGE_SIZE _PAGE_SIZE
38
39/* This LOAD_OFFSET is temporary for debugging on the simulator; it may change
40 for hypervisor pseudo-physical memory. */
41
42
43SECTIONS
44{
45 . = PAGE_OFFSET + LOAD_ADDRESS;
46
47 __init_begin = .;
48 HEAD_TEXT_SECTION
49 INIT_TEXT_SECTION(PAGE_SIZE)
50 PERCPU_SECTION(L1_CACHE_BYTES)
51 __init_end = .;
52
53 . = ALIGN(_PAGE_SIZE);
54 _stext = .;
55 .text : AT(ADDR(.text) - LOAD_OFFSET) {
56 _text = .;
57 TEXT_TEXT
58 SCHED_TEXT
59 LOCK_TEXT
60 KPROBES_TEXT
61 *(.fixup)
62 }
63 _etext = .;
64
65 INIT_DATA_SECTION(PAGE_SIZE)
66
67 _sdata = .;
68 RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
69 RO_DATA_SECTION(PAGE_SIZE)
70 _edata = .;
71
72 EXCEPTION_TABLE(16)
73 NOTES
74
75 BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
76
77 _end = .;
78
79 /DISCARD/ : {
80 EXIT_TEXT
81 EXIT_DATA
82 EXIT_CALL
83 }
84
85 STABS_DEBUG
86 DWARF_DEBUG
87
88}
diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile
deleted file mode 100644
index 874655e8567..00000000000
--- a/arch/hexagon/lib/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
1#
2# Makefile for hexagon-specific library files.
3#
4obj-y = checksum.o io.o memcpy.o memset.o
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
deleted file mode 100644
index 8169f78a46a..00000000000
--- a/arch/hexagon/lib/checksum.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Checksum functions for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/* This was derived from arch/alpha/lib/checksum.c */
22
23
24#include <linux/module.h>
25#include <linux/string.h>
26
27#include <asm/byteorder.h>
28#include <net/checksum.h>
29#include <linux/uaccess.h>
30#include <asm/intrinsics.h>
31
32
33/* Vector value operations */
34#define SIGN(x, y) ((0x8000ULL*x)<<y)
35#define CARRY(x, y) ((0x0002ULL*x)<<y)
36#define SELECT(x, y) ((0x0001ULL*x)<<y)
37
38#define VR_NEGATE(a, b, c, d) (SIGN(a, 48) + SIGN(b, 32) + SIGN(c, 16) \
39 + SIGN(d, 0))
40#define VR_CARRY(a, b, c, d) (CARRY(a, 48) + CARRY(b, 32) + CARRY(c, 16) \
41 + CARRY(d, 0))
42#define VR_SELECT(a, b, c, d) (SELECT(a, 48) + SELECT(b, 32) + SELECT(c, 16) \
43 + SELECT(d, 0))
44
45
46/* optimized HEXAGON V3 intrinsic version */
47static inline unsigned short from64to16(u64 x)
48{
49 u64 sum;
50
51 sum = HEXAGON_P_vrmpyh_PP(x^VR_NEGATE(1, 1, 1, 1),
52 VR_SELECT(1, 1, 1, 1));
53 sum += VR_CARRY(0, 0, 1, 0);
54 sum = HEXAGON_P_vrmpyh_PP(sum, VR_SELECT(0, 0, 1, 1));
55
56 return 0xFFFF & sum;
57}
58
59/*
60 * computes the checksum of the TCP/UDP pseudo-header
61 * returns a 16-bit checksum, already complemented.
62 */
63__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
64 unsigned short len, unsigned short proto,
65 __wsum sum)
66{
67 return (__force __sum16)~from64to16(
68 (__force u64)saddr + (__force u64)daddr +
69 (__force u64)sum + ((len + proto) << 8));
70}
71
72__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
73 unsigned short len, unsigned short proto,
74 __wsum sum)
75{
76 u64 result;
77
78 result = (__force u64)saddr + (__force u64)daddr +
79 (__force u64)sum + ((len + proto) << 8);
80
81 /* Fold down to 32-bits so we don't lose in the typedef-less
82 network stack. */
83 /* 64 to 33 */
84 result = (result & 0xffffffffUL) + (result >> 32);
85 /* 33 to 32 */
86 result = (result & 0xffffffffUL) + (result >> 32);
87 return (__force __wsum)result;
88}
89EXPORT_SYMBOL(csum_tcpudp_nofold);
90
91/*
92 * Do a 64-bit checksum on an arbitrary memory area..
93 *
94 * This isn't a great routine, but it's not _horrible_ either. The
95 * inner loop could be unrolled a bit further, and there are better
96 * ways to do the carry, but this is reasonable.
97 */
98
99/* optimized HEXAGON intrinsic version, with over read fixed */
100unsigned int do_csum(const void *voidptr, int len)
101{
102 u64 sum0, sum1, x0, x1, *ptr8_o, *ptr8_e, *ptr8;
103 int i, start, mid, end, mask;
104 const char *ptr = voidptr;
105 unsigned short *ptr2;
106 unsigned int *ptr4;
107
108 if (len <= 0)
109 return 0;
110
111 start = 0xF & (16-(((int) ptr) & 0xF)) ;
112 mask = 0x7fffffffUL >> HEXAGON_R_cl0_R(len);
113 start = start & mask ;
114
115 mid = len - start;
116 end = mid & 0xF;
117 mid = mid>>4;
118 sum0 = mid << 18;
119 sum1 = 0;
120
121 if (start & 1)
122 sum0 += (u64) (ptr[0] << 8);
123 ptr2 = (unsigned short *) &ptr[start & 1];
124 if (start & 2)
125 sum1 += (u64) ptr2[0];
126 ptr4 = (unsigned int *) &ptr[start & 3];
127 if (start & 4) {
128 sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
129 VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
130 VR_SELECT(0, 0, 1, 1));
131 sum0 += VR_SELECT(0, 0, 1, 0);
132 }
133 ptr8 = (u64 *) &ptr[start & 7];
134 if (start & 8) {
135 sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
136 VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
137 VR_SELECT(1, 1, 1, 1));
138 sum1 += VR_CARRY(0, 0, 1, 0);
139 }
140 ptr8_o = (u64 *) (ptr + start);
141 ptr8_e = (u64 *) (ptr + start + 8);
142
143 if (mid) {
144 x0 = *ptr8_e; ptr8_e += 2;
145 x1 = *ptr8_o; ptr8_o += 2;
146 if (mid > 1)
147 for (i = 0; i < mid-1; i++) {
148 sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
149 x0^VR_NEGATE(1, 1, 1, 1),
150 VR_SELECT(1, 1, 1, 1));
151 sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
152 x1^VR_NEGATE(1, 1, 1, 1),
153 VR_SELECT(1, 1, 1, 1));
154 x0 = *ptr8_e; ptr8_e += 2;
155 x1 = *ptr8_o; ptr8_o += 2;
156 }
157 sum0 = HEXAGON_P_vrmpyhacc_PP(sum0, x0^VR_NEGATE(1, 1, 1, 1),
158 VR_SELECT(1, 1, 1, 1));
159 sum1 = HEXAGON_P_vrmpyhacc_PP(sum1, x1^VR_NEGATE(1, 1, 1, 1),
160 VR_SELECT(1, 1, 1, 1));
161 }
162
163 ptr4 = (unsigned int *) &ptr[start + (mid * 16) + (end & 8)];
164 if (end & 4) {
165 sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
166 VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
167 VR_SELECT(0, 0, 1, 1));
168 sum1 += VR_SELECT(0, 0, 1, 0);
169 }
170 ptr2 = (unsigned short *) &ptr[start + (mid * 16) + (end & 12)];
171 if (end & 2)
172 sum0 += (u64) ptr2[0];
173
174 if (end & 1)
175 sum1 += (u64) ptr[start + (mid * 16) + (end & 14)];
176
177 ptr8 = (u64 *) &ptr[start + (mid * 16)];
178 if (end & 8) {
179 sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
180 VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
181 VR_SELECT(1, 1, 1, 1));
182 sum0 += VR_CARRY(0, 0, 1, 0);
183 }
184 sum0 = HEXAGON_P_vrmpyh_PP((sum0+sum1)^VR_NEGATE(0, 0, 0, 1),
185 VR_SELECT(0, 0, 1, 1));
186 sum0 += VR_NEGATE(0, 0, 0, 1);
187 sum0 = HEXAGON_P_vrmpyh_PP(sum0, VR_SELECT(0, 0, 1, 1));
188
189 if (start & 1)
190 sum0 = (sum0 << 8) | (0xFF & (sum0 >> 8));
191
192 return 0xFFFF & sum0;
193}
194
195/*
196 * copy from ds while checksumming, otherwise like csum_partial
197 */
198__wsum
199csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
200{
201 memcpy(dst, src, len);
202 return csum_partial(dst, len, sum);
203}
diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c
deleted file mode 100644
index 885c9626d5e..00000000000
--- a/arch/hexagon/lib/io.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * I/O access functions for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <asm/io.h>
22
23/* These are all FIFO routines! */
24
25/*
26 * __raw_readsw - read words a short at a time
27 * @addr: source address
28 * @data: data address
29 * @len: number of shorts to read
30 */
31void __raw_readsw(const void __iomem *addr, void *data, int len)
32{
33 const volatile short int *src = (short int *) addr;
34 short int *dst = (short int *) data;
35
36 if ((u32)data & 0x1)
37 panic("unaligned pointer to readsw");
38
39 while (len-- > 0)
40 *dst++ = *src;
41
42}
43
44/*
45 * __raw_writesw - read words a short at a time
46 * @addr: source address
47 * @data: data address
48 * @len: number of shorts to read
49 */
50void __raw_writesw(void __iomem *addr, const void *data, int len)
51{
52 const short int *src = (short int *)data;
53 volatile short int *dst = (short int *)addr;
54
55 if ((u32)data & 0x1)
56 panic("unaligned pointer to writesw");
57
58 while (len-- > 0)
59 *dst = *src++;
60
61
62}
63
64/* Pretty sure len is pre-adjusted for the length of the access already */
65void __raw_readsl(const void __iomem *addr, void *data, int len)
66{
67 const volatile long *src = (long *) addr;
68 long *dst = (long *) data;
69
70 if ((u32)data & 0x3)
71 panic("unaligned pointer to readsl");
72
73 while (len-- > 0)
74 *dst++ = *src;
75
76
77}
78
79void __raw_writesl(void __iomem *addr, const void *data, int len)
80{
81 const long *src = (long *)data;
82 volatile long *dst = (long *)addr;
83
84 if ((u32)data & 0x3)
85 panic("unaligned pointer to writesl");
86
87 while (len-- > 0)
88 *dst = *src++;
89
90
91}
diff --git a/arch/hexagon/lib/memcpy.S b/arch/hexagon/lib/memcpy.S
deleted file mode 100644
index 81c561c4b4d..00000000000
--- a/arch/hexagon/lib/memcpy.S
+++ /dev/null
@@ -1,543 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20/*
21 * Description
22 *
23 * library function for memcpy where length bytes are copied from
24 * ptr_in to ptr_out. ptr_out is returned unchanged.
25 * Allows any combination of alignment on input and output pointers
26 * and length from 0 to 2^32-1
27 *
28 * Restrictions
29 * The arrays should not overlap, the program will produce undefined output
30 * if they do.
31 * For blocks less than 16 bytes a byte by byte copy is performed. For
32 * 8byte alignments, and length multiples, a dword copy is performed up to
33 * 96bytes
34 * History
35 *
36 * DJH 5/15/09 Initial version 1.0
37 * DJH 6/ 1/09 Version 1.1 modified ABI to inlcude R16-R19
38 * DJH 7/12/09 Version 1.2 optimized codesize down to 760 was 840
39 * DJH 10/14/09 Version 1.3 added special loop for aligned case, was
40 * overreading bloated codesize back up to 892
41 * DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
42 * occuring if only 1 left outstanding, fixes bug
43 * # 3888, corrected for all alignments. Peeled off
44 * 1 32byte chunk from kernel loop and extended 8byte
45 * loop at end to solve all combinations and prevent
46 * over read. Fixed Ldword_loop_prolog to prevent
47 * overread for blocks less than 48bytes. Reduced
48 * codesize to 752 bytes
49 * DJH 4/21/10 version 1.5 1.4 fix broke code for input block ends not
50 * aligned to dword boundaries,underwriting by 1
51 * byte, added detection for this and fixed. A
52 * little bloat.
53 * DJH 4/23/10 version 1.6 corrected stack error, R20 was not being restored
54 * always, fixed the error of R20 being modified
55 * before it was being saved
56 * Natural c model
57 * ===============
58 * void * memcpy(char * ptr_out, char * ptr_in, int length) {
59 * int i;
60 * if(length) for(i=0; i < length; i++) { ptr_out[i] = ptr_in[i]; }
61 * return(ptr_out);
62 * }
63 *
64 * Optimized memcpy function
65 * =========================
66 * void * memcpy(char * ptr_out, char * ptr_in, int len) {
67 * int i, prolog, kernel, epilog, mask;
68 * u8 offset;
69 * s64 data0, dataF8, data70;
70 *
71 * s64 * ptr8_in;
72 * s64 * ptr8_out;
73 * s32 * ptr4;
74 * s16 * ptr2;
75 *
76 * offset = ((int) ptr_in) & 7;
77 * ptr8_in = (s64 *) &ptr_in[-offset]; //read in the aligned pointers
78 *
79 * data70 = *ptr8_in++;
80 * dataF8 = *ptr8_in++;
81 *
82 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
83 *
84 * prolog = 32 - ((int) ptr_out);
85 * mask = 0x7fffffff >> HEXAGON_R_cl0_R(len);
86 * prolog = prolog & mask;
87 * kernel = len - prolog;
88 * epilog = kernel & 0x1F;
89 * kernel = kernel>>5;
90 *
91 * if (prolog & 1) { ptr_out[0] = (u8) data0; data0 >>= 8; ptr_out += 1;}
92 * ptr2 = (s16 *) &ptr_out[0];
93 * if (prolog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
94 * ptr4 = (s32 *) &ptr_out[0];
95 * if (prolog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
96 *
97 * offset = offset + (prolog & 7);
98 * if (offset >= 8) {
99 * data70 = dataF8;
100 * dataF8 = *ptr8_in++;
101 * }
102 * offset = offset & 0x7;
103 *
104 * prolog = prolog >> 3;
105 * if (prolog) for (i=0; i < prolog; i++) {
106 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
107 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
108 * data70 = dataF8;
109 * dataF8 = *ptr8_in++;
110 * }
111 * if(kernel) { kernel -= 1; epilog += 32; }
112 * if(kernel) for(i=0; i < kernel; i++) {
113 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
114 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
115 * data70 = *ptr8_in++;
116 *
117 * data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
118 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
119 * dataF8 = *ptr8_in++;
120 *
121 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
122 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
123 * data70 = *ptr8_in++;
124 *
125 * data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
126 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
127 * dataF8 = *ptr8_in++;
128 * }
129 * epilogdws = epilog >> 3;
130 * if (epilogdws) for (i=0; i < epilogdws; i++) {
131 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
132 * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
133 * data70 = dataF8;
134 * dataF8 = *ptr8_in++;
135 * }
136 * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
137 *
138 * ptr4 = (s32 *) &ptr_out[0];
139 * if (epilog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
140 * ptr2 = (s16 *) &ptr_out[0];
141 * if (epilog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
142 * if (epilog & 1) { *ptr_out++ = (u8) data0; }
143 *
144 * return(ptr_out - length);
145 * }
146 *
147 * Codesize : 784 bytes
148 */
149
150
151#define ptr_out R0 /* destination pounter */
152#define ptr_in R1 /* source pointer */
153#define len R2 /* length of copy in bytes */
154
155#define data70 R13:12 /* lo 8 bytes of non-aligned transfer */
156#define dataF8 R11:10 /* hi 8 bytes of non-aligned transfer */
157#define ldata0 R7:6 /* even 8 bytes chunks */
158#define ldata1 R25:24 /* odd 8 bytes chunks */
159#define data1 R7 /* lower 8 bytes of ldata1 */
160#define data0 R6 /* lower 8 bytes of ldata0 */
161
162#define ifbyte p0 /* if transfer has bytes in epilog/prolog */
163#define ifhword p0 /* if transfer has shorts in epilog/prolog */
164#define ifword p0 /* if transfer has words in epilog/prolog */
165#define noprolog p0 /* no prolog, xfer starts at 32byte */
166#define nokernel p1 /* no 32byte multiple block in the transfer */
167#define noepilog p0 /* no epilog, xfer ends on 32byte boundary */
168#define align p2 /* alignment of input rel to 8byte boundary */
169#define kernel1 p0 /* kernel count == 1 */
170
171#define dalign R25 /* rel alignment of input to output data */
172#define star3 R16 /* number bytes in prolog - dwords */
173#define rest R8 /* length - prolog bytes */
174#define back R7 /* nr bytes > dword boundary in src block */
175#define epilog R3 /* bytes in epilog */
176#define inc R15:14 /* inc kernel by -1 and defetch ptr by 32 */
177#define kernel R4 /* number of 32byte chunks in kernel */
178#define ptr_in_p_128 R5 /* pointer for prefetch of input data */
179#define mask R8 /* mask used to determine prolog size */
180#define shift R8 /* used to work a shifter to extract bytes */
181#define shift2 R5 /* in epilog to workshifter to extract bytes */
182#define prolog R15 /* bytes in prolog */
183#define epilogdws R15 /* number dwords in epilog */
184#define shiftb R14 /* used to extract bytes */
185#define offset R9 /* same as align in reg */
186#define ptr_out_p_32 R17 /* pointer to output dczero */
187#define align888 R14 /* if simple dword loop can be used */
188#define len8 R9 /* number of dwords in length */
189#define over R20 /* nr of bytes > last inp buf dword boundary */
190
191#define ptr_in_p_128kernel R5:4 /* packed fetch pointer & kernel cnt */
192
193 .section .text
194 .p2align 4
195 .global memcpy
196 .type memcpy, @function
197memcpy:
198{
199 p2 = cmp.eq(len, #0); /* =0 */
200 align888 = or(ptr_in, ptr_out); /* %8 < 97 */
201 p0 = cmp.gtu(len, #23); /* %1, <24 */
202 p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */
203}
204{
205 p1 = or(p2, p1);
206 p3 = cmp.gtu(len, #95); /* %8 < 97 */
207 align888 = or(align888, len); /* %8 < 97 */
208 len8 = lsr(len, #3); /* %8 < 97 */
209}
210{
211 dcfetch(ptr_in); /* zero/ptrin=ptrout causes fetch */
212 p2 = bitsclr(align888, #7); /* %8 < 97 */
213 if(p1) jumpr r31; /* =0 */
214}
215{
216 p2 = and(p2,!p3); /* %8 < 97 */
217 if (p2.new) len = add(len, #-8); /* %8 < 97 */
218 if (p2.new) jump:NT .Ldwordaligned; /* %8 < 97 */
219}
220{
221 if(!p0) jump .Lbytes23orless; /* %1, <24 */
222 mask.l = #LO(0x7fffffff);
223 /* all bytes before line multiples of data */
224 prolog = sub(#0, ptr_out);
225}
226{
227 /* save r31 on stack, decrement sp by 16 */
228 allocframe(#24);
229 mask.h = #HI(0x7fffffff);
230 ptr_in_p_128 = add(ptr_in, #32);
231 back = cl0(len);
232}
233{
234 memd(sp+#0) = R17:16; /* save r16,r17 on stack6 */
235 r31.l = #LO(.Lmemcpy_return); /* set up final return pointer */
236 prolog &= lsr(mask, back);
237 offset = and(ptr_in, #7);
238}
239{
240 memd(sp+#8) = R25:24; /* save r25,r24 on stack */
241 dalign = sub(ptr_out, ptr_in);
242 r31.h = #HI(.Lmemcpy_return); /* set up final return pointer */
243}
244{
245 /* see if there if input buffer end if aligned */
246 over = add(len, ptr_in);
247 back = add(len, offset);
248 memd(sp+#16) = R21:20; /* save r20,r21 on stack */
249}
250{
251 noprolog = bitsclr(prolog, #7);
252 prolog = and(prolog, #31);
253 dcfetch(ptr_in_p_128);
254 ptr_in_p_128 = add(ptr_in_p_128, #32);
255}
256{
257 kernel = sub(len, prolog);
258 shift = asl(prolog, #3);
259 star3 = and(prolog, #7);
260 ptr_in = and(ptr_in, #-8);
261}
262{
263 prolog = lsr(prolog, #3);
264 epilog = and(kernel, #31);
265 ptr_out_p_32 = add(ptr_out, prolog);
266 over = and(over, #7);
267}
268{
269 p3 = cmp.gtu(back, #8);
270 kernel = lsr(kernel, #5);
271 dcfetch(ptr_in_p_128);
272 ptr_in_p_128 = add(ptr_in_p_128, #32);
273}
274{
275 p1 = cmp.eq(prolog, #0);
276 if(!p1.new) prolog = add(prolog, #1);
277 dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
278 ptr_in_p_128 = add(ptr_in_p_128, #32);
279}
280{
281 nokernel = cmp.eq(kernel,#0);
282 dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
283 ptr_in_p_128 = add(ptr_in_p_128, #32);
284 shiftb = and(shift, #8);
285}
286{
287 dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
288 ptr_in_p_128 = add(ptr_in_p_128, #32);
289 if(nokernel) jump .Lskip64;
290 p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */
291}
292{
293 dczeroa(ptr_out_p_32);
294 /* don't advance pointer */
295 if(!p2) ptr_out_p_32 = add(ptr_out_p_32, #32);
296}
297{
298 dalign = and(dalign, #31);
299 dczeroa(ptr_out_p_32);
300}
301.Lskip64:
302{
303 data70 = memd(ptr_in++#16);
304 if(p3) dataF8 = memd(ptr_in+#8);
305 if(noprolog) jump .Lnoprolog32;
306 align = offset;
307}
308/* upto initial 7 bytes */
309{
310 ldata0 = valignb(dataF8, data70, align);
311 ifbyte = tstbit(shift,#3);
312 offset = add(offset, star3);
313}
314{
315 if(ifbyte) memb(ptr_out++#1) = data0;
316 ldata0 = lsr(ldata0, shiftb);
317 shiftb = and(shift, #16);
318 ifhword = tstbit(shift,#4);
319}
320{
321 if(ifhword) memh(ptr_out++#2) = data0;
322 ldata0 = lsr(ldata0, shiftb);
323 ifword = tstbit(shift,#5);
324 p2 = cmp.gtu(offset, #7);
325}
326{
327 if(ifword) memw(ptr_out++#4) = data0;
328 if(p2) data70 = dataF8;
329 if(p2) dataF8 = memd(ptr_in++#8); /* another 8 bytes */
330 align = offset;
331}
332.Lnoprolog32:
333{
334 p3 = sp1loop0(.Ldword_loop_prolog, prolog)
335 rest = sub(len, star3); /* whats left after the loop */
336 p0 = cmp.gt(over, #0);
337}
338 if(p0) rest = add(rest, #16);
339.Ldword_loop_prolog:
340{
341 if(p3) memd(ptr_out++#8) = ldata0;
342 ldata0 = valignb(dataF8, data70, align);
343 p0 = cmp.gt(rest, #16);
344}
345{
346 data70 = dataF8;
347 if(p0) dataF8 = memd(ptr_in++#8);
348 rest = add(rest, #-8);
349}:endloop0
350.Lkernel:
351{
352 /* kernel is at least 32bytes */
353 p3 = cmp.gtu(kernel, #0);
354 /* last itn. remove edge effects */
355 if(p3.new) kernel = add(kernel, #-1);
356 /* dealt with in last dword loop */
357 if(p3.new) epilog = add(epilog, #32);
358}
359{
360 nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */
361 if(nokernel.new) jump:NT .Lepilog; /* likely not taken */
362 inc = combine(#32, #-1);
363 p3 = cmp.gtu(dalign, #24);
364}
365{
366 if(p3) jump .Lodd_alignment;
367}
368{
369 loop0(.Loword_loop_25to31, kernel);
370 kernel1 = cmp.gtu(kernel, #1);
371 rest = kernel;
372}
373 .falign
374.Loword_loop_25to31:
375{
376 dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
377 if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
378}
379{
380 dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
381 p3 = cmp.eq(kernel, rest);
382}
383{
384 /* kernel -= 1 */
385 ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
386 /* kill write on first iteration */
387 if(!p3) memd(ptr_out++#8) = ldata1;
388 ldata1 = valignb(dataF8, data70, align);
389 data70 = memd(ptr_in++#8);
390}
391{
392 memd(ptr_out++#8) = ldata0;
393 ldata0 = valignb(data70, dataF8, align);
394 dataF8 = memd(ptr_in++#8);
395}
396{
397 memd(ptr_out++#8) = ldata1;
398 ldata1 = valignb(dataF8, data70, align);
399 data70 = memd(ptr_in++#8);
400}
401{
402 memd(ptr_out++#8) = ldata0;
403 ldata0 = valignb(data70, dataF8, align);
404 dataF8 = memd(ptr_in++#8);
405 kernel1 = cmp.gtu(kernel, #1);
406}:endloop0
407{
408 memd(ptr_out++#8) = ldata1;
409 jump .Lepilog;
410}
411.Lodd_alignment:
412{
413 loop0(.Loword_loop_00to24, kernel);
414 kernel1 = cmp.gtu(kernel, #1);
415 rest = add(kernel, #-1);
416}
417 .falign
418.Loword_loop_00to24:
419{
420 dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
421 ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
422 if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
423}
424{
425 dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
426}
427{
428 memd(ptr_out++#8) = ldata0;
429 ldata0 = valignb(dataF8, data70, align);
430 data70 = memd(ptr_in++#8);
431}
432{
433 memd(ptr_out++#8) = ldata0;
434 ldata0 = valignb(data70, dataF8, align);
435 dataF8 = memd(ptr_in++#8);
436}
437{
438 memd(ptr_out++#8) = ldata0;
439 ldata0 = valignb(dataF8, data70, align);
440 data70 = memd(ptr_in++#8);
441}
442{
443 memd(ptr_out++#8) = ldata0;
444 ldata0 = valignb(data70, dataF8, align);
445 dataF8 = memd(ptr_in++#8);
446 kernel1 = cmp.gtu(kernel, #1);
447}:endloop0
448.Lepilog:
449{
450 noepilog = cmp.eq(epilog,#0);
451 epilogdws = lsr(epilog, #3);
452 kernel = and(epilog, #7);
453}
454{
455 if(noepilog) jumpr r31;
456 if(noepilog) ptr_out = sub(ptr_out, len);
457 p3 = cmp.eq(epilogdws, #0);
458 shift2 = asl(epilog, #3);
459}
460{
461 shiftb = and(shift2, #32);
462 ifword = tstbit(epilog,#2);
463 if(p3) jump .Lepilog60;
464 if(!p3) epilog = add(epilog, #-16);
465}
466{
467 loop0(.Ldword_loop_epilog, epilogdws);
468 /* stop criteria is lsbs unless = 0 then its 8 */
469 p3 = cmp.eq(kernel, #0);
470 if(p3.new) kernel= #8;
471 p1 = cmp.gt(over, #0);
472}
473 /* if not aligned to end of buffer execute 1 more iteration */
474 if(p1) kernel= #0;
475.Ldword_loop_epilog:
476{
477 memd(ptr_out++#8) = ldata0;
478 ldata0 = valignb(dataF8, data70, align);
479 p3 = cmp.gt(epilog, kernel);
480}
481{
482 data70 = dataF8;
483 if(p3) dataF8 = memd(ptr_in++#8);
484 epilog = add(epilog, #-8);
485}:endloop0
486/* copy last 7 bytes */
487.Lepilog60:
488{
489 if(ifword) memw(ptr_out++#4) = data0;
490 ldata0 = lsr(ldata0, shiftb);
491 ifhword = tstbit(epilog,#1);
492 shiftb = and(shift2, #16);
493}
494{
495 if(ifhword) memh(ptr_out++#2) = data0;
496 ldata0 = lsr(ldata0, shiftb);
497 ifbyte = tstbit(epilog,#0);
498 if(ifbyte.new) len = add(len, #-1);
499}
500{
501 if(ifbyte) memb(ptr_out) = data0;
502 ptr_out = sub(ptr_out, len); /* return dest pointer */
503 jumpr r31;
504}
505/* do byte copy for small n */
506.Lbytes23orless:
507{
508 p3 = sp1loop0(.Lbyte_copy, len);
509 len = add(len, #-1);
510}
511.Lbyte_copy:
512{
513 data0 = memb(ptr_in++#1);
514 if(p3) memb(ptr_out++#1) = data0;
515}:endloop0
516{
517 memb(ptr_out) = data0;
518 ptr_out = sub(ptr_out, len);
519 jumpr r31;
520}
521/* do dword copies for aligned in, out and length */
522.Ldwordaligned:
523{
524 p3 = sp1loop0(.Ldword_copy, len8);
525}
526.Ldword_copy:
527{
528 if(p3) memd(ptr_out++#8) = ldata0;
529 ldata0 = memd(ptr_in++#8);
530}:endloop0
531{
532 memd(ptr_out) = ldata0;
533 ptr_out = sub(ptr_out, len);
534 jumpr r31; /* return to function caller */
535}
536.Lmemcpy_return:
537 r21:20 = memd(sp+#16); /* restore r20+r21 */
538{
539 r25:24 = memd(sp+#8); /* restore r24+r25 */
540 r17:16 = memd(sp+#0); /* restore r16+r17 */
541}
542 deallocframe; /* restore r31 and incrment stack by 16 */
543 jumpr r31
diff --git a/arch/hexagon/lib/memset.S b/arch/hexagon/lib/memset.S
deleted file mode 100644
index 9341889ea3f..00000000000
--- a/arch/hexagon/lib/memset.S
+++ /dev/null
@@ -1,315 +0,0 @@
1/*
2 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19
20/* HEXAGON assembly optimized memset */
21/* Replaces the standard library function memset */
22
23
24 .macro HEXAGON_OPT_FUNC_BEGIN name
25 .text
26 .p2align 4
27 .globl \name
28 .type \name, @function
29\name:
30 .endm
31
32 .macro HEXAGON_OPT_FUNC_FINISH name
33 .size \name, . - \name
34 .endm
35
36/* FUNCTION: memset (v2 version) */
37#if __HEXAGON_ARCH__ < 3
38HEXAGON_OPT_FUNC_BEGIN memset
39 {
40 r6 = #8
41 r7 = extractu(r0, #3 , #0)
42 p0 = cmp.eq(r2, #0)
43 p1 = cmp.gtu(r2, #7)
44 }
45 {
46 r4 = vsplatb(r1)
47 r8 = r0 /* leave r0 intact for return val */
48 r9 = sub(r6, r7) /* bytes until double alignment */
49 if p0 jumpr r31 /* count == 0, so return */
50 }
51 {
52 r3 = #0
53 r7 = #0
54 p0 = tstbit(r9, #0)
55 if p1 jump 2f /* skip byte loop */
56 }
57
58/* less than 8 bytes to set, so just set a byte at a time and return */
59
60 loop0(1f, r2) /* byte loop */
61 .falign
621: /* byte loop */
63 {
64 memb(r8++#1) = r4
65 }:endloop0
66 jumpr r31
67 .falign
682: /* skip byte loop */
69 {
70 r6 = #1
71 p0 = tstbit(r9, #1)
72 p1 = cmp.eq(r2, #1)
73 if !p0 jump 3f /* skip initial byte store */
74 }
75 {
76 memb(r8++#1) = r4
77 r3:2 = sub(r3:2, r7:6)
78 if p1 jumpr r31
79 }
80 .falign
813: /* skip initial byte store */
82 {
83 r6 = #2
84 p0 = tstbit(r9, #2)
85 p1 = cmp.eq(r2, #2)
86 if !p0 jump 4f /* skip initial half store */
87 }
88 {
89 memh(r8++#2) = r4
90 r3:2 = sub(r3:2, r7:6)
91 if p1 jumpr r31
92 }
93 .falign
944: /* skip initial half store */
95 {
96 r6 = #4
97 p0 = cmp.gtu(r2, #7)
98 p1 = cmp.eq(r2, #4)
99 if !p0 jump 5f /* skip initial word store */
100 }
101 {
102 memw(r8++#4) = r4
103 r3:2 = sub(r3:2, r7:6)
104 p0 = cmp.gtu(r2, #11)
105 if p1 jumpr r31
106 }
107 .falign
1085: /* skip initial word store */
109 {
110 r10 = lsr(r2, #3)
111 p1 = cmp.eq(r3, #1)
112 if !p0 jump 7f /* skip double loop */
113 }
114 {
115 r5 = r4
116 r6 = #8
117 loop0(6f, r10) /* double loop */
118 }
119
120/* set bytes a double word at a time */
121
122 .falign
1236: /* double loop */
124 {
125 memd(r8++#8) = r5:4
126 r3:2 = sub(r3:2, r7:6)
127 p1 = cmp.eq(r2, #8)
128 }:endloop0
129 .falign
1307: /* skip double loop */
131 {
132 p0 = tstbit(r2, #2)
133 if p1 jumpr r31
134 }
135 {
136 r6 = #4
137 p0 = tstbit(r2, #1)
138 p1 = cmp.eq(r2, #4)
139 if !p0 jump 8f /* skip final word store */
140 }
141 {
142 memw(r8++#4) = r4
143 r3:2 = sub(r3:2, r7:6)
144 if p1 jumpr r31
145 }
146 .falign
1478: /* skip final word store */
148 {
149 p1 = cmp.eq(r2, #2)
150 if !p0 jump 9f /* skip final half store */
151 }
152 {
153 memh(r8++#2) = r4
154 if p1 jumpr r31
155 }
156 .falign
1579: /* skip final half store */
158 {
159 memb(r8++#1) = r4
160 jumpr r31
161 }
162HEXAGON_OPT_FUNC_FINISH memset
163#endif
164
165
166/* FUNCTION: memset (v3 and higher version) */
167#if __HEXAGON_ARCH__ >= 3
168HEXAGON_OPT_FUNC_BEGIN memset
169 {
170 r7=vsplatb(r1)
171 r6 = r0
172 if (r2==#0) jump:nt .L1
173 }
174 {
175 r5:4=combine(r7,r7)
176 p0 = cmp.gtu(r2,#8)
177 if (p0.new) jump:nt .L3
178 }
179 {
180 r3 = r0
181 loop0(.L47,r2)
182 }
183 .falign
184.L47:
185 {
186 memb(r3++#1) = r1
187 }:endloop0 /* start=.L47 */
188 jumpr r31
189.L3:
190 {
191 p0 = tstbit(r0,#0)
192 if (!p0.new) jump:nt .L8
193 p1 = cmp.eq(r2, #1)
194 }
195 {
196 r6 = add(r0, #1)
197 r2 = add(r2,#-1)
198 memb(r0) = r1
199 if (p1) jump .L1
200 }
201.L8:
202 {
203 p0 = tstbit(r6,#1)
204 if (!p0.new) jump:nt .L10
205 }
206 {
207 r2 = add(r2,#-2)
208 memh(r6++#2) = r7
209 p0 = cmp.eq(r2, #2)
210 if (p0.new) jump:nt .L1
211 }
212.L10:
213 {
214 p0 = tstbit(r6,#2)
215 if (!p0.new) jump:nt .L12
216 }
217 {
218 r2 = add(r2,#-4)
219 memw(r6++#4) = r7
220 p0 = cmp.eq(r2, #4)
221 if (p0.new) jump:nt .L1
222 }
223.L12:
224 {
225 p0 = cmp.gtu(r2,#127)
226 if (!p0.new) jump:nt .L14
227 }
228 r3 = and(r6,#31)
229 if (r3==#0) jump:nt .L17
230 {
231 memd(r6++#8) = r5:4
232 r2 = add(r2,#-8)
233 }
234 r3 = and(r6,#31)
235 if (r3==#0) jump:nt .L17
236 {
237 memd(r6++#8) = r5:4
238 r2 = add(r2,#-8)
239 }
240 r3 = and(r6,#31)
241 if (r3==#0) jump:nt .L17
242 {
243 memd(r6++#8) = r5:4
244 r2 = add(r2,#-8)
245 }
246.L17:
247 {
248 r3 = lsr(r2,#5)
249 if (r1!=#0) jump:nt .L18
250 }
251 {
252 r8 = r3
253 r3 = r6
254 loop0(.L46,r3)
255 }
256 .falign
257.L46:
258 {
259 dczeroa(r6)
260 r6 = add(r6,#32)
261 r2 = add(r2,#-32)
262 }:endloop0 /* start=.L46 */
263.L14:
264 {
265 p0 = cmp.gtu(r2,#7)
266 if (!p0.new) jump:nt .L28
267 r8 = lsr(r2,#3)
268 }
269 loop0(.L44,r8)
270 .falign
271.L44:
272 {
273 memd(r6++#8) = r5:4
274 r2 = add(r2,#-8)
275 }:endloop0 /* start=.L44 */
276.L28:
277 {
278 p0 = tstbit(r2,#2)
279 if (!p0.new) jump:nt .L33
280 }
281 {
282 r2 = add(r2,#-4)
283 memw(r6++#4) = r7
284 }
285.L33:
286 {
287 p0 = tstbit(r2,#1)
288 if (!p0.new) jump:nt .L35
289 }
290 {
291 r2 = add(r2,#-2)
292 memh(r6++#2) = r7
293 }
294.L35:
295 p0 = cmp.eq(r2,#1)
296 if (p0) memb(r6) = r1
297.L1:
298 jumpr r31
299.L18:
300 loop0(.L45,r3)
301 .falign
302.L45:
303 dczeroa(r6)
304 {
305 memd(r6++#8) = r5:4
306 r2 = add(r2,#-32)
307 }
308 memd(r6++#8) = r5:4
309 memd(r6++#8) = r5:4
310 {
311 memd(r6++#8) = r5:4
312 }:endloop0 /* start=.L45 */
313 jump .L14
314HEXAGON_OPT_FUNC_FINISH memset
315#endif
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
deleted file mode 100644
index 1a0be4d576e..00000000000
--- a/arch/hexagon/mm/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for Hexagon memory management subsystem
3#
4
5obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
6obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
deleted file mode 100644
index fe14ccf2856..00000000000
--- a/arch/hexagon/mm/cache.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Cache management functions for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/mm.h>
22#include <asm/cacheflush.h>
23#include <asm/hexagon_vm.h>
24
25#define spanlines(start, end) \
26 (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
27
28void flush_dcache_range(unsigned long start, unsigned long end)
29{
30 unsigned long lines = spanlines(start, end-1);
31 unsigned long i, flags;
32
33 start &= ~(LINESIZE - 1);
34
35 local_irq_save(flags);
36
37 for (i = 0; i < lines; i++) {
38 __asm__ __volatile__ (
39 " dccleaninva(%0); "
40 :
41 : "r" (start)
42 );
43 start += LINESIZE;
44 }
45 local_irq_restore(flags);
46}
47
48void flush_icache_range(unsigned long start, unsigned long end)
49{
50 unsigned long lines = spanlines(start, end-1);
51 unsigned long i, flags;
52
53 start &= ~(LINESIZE - 1);
54
55 local_irq_save(flags);
56
57 for (i = 0; i < lines; i++) {
58 __asm__ __volatile__ (
59 " dccleana(%0); "
60 " icinva(%0); "
61 :
62 : "r" (start)
63 );
64 start += LINESIZE;
65 }
66 __asm__ __volatile__ (
67 "isync"
68 );
69 local_irq_restore(flags);
70}
71
72void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
73{
74 unsigned long lines = spanlines(start, end-1);
75 unsigned long i, flags;
76
77 start &= ~(LINESIZE - 1);
78
79 local_irq_save(flags);
80
81 for (i = 0; i < lines; i++) {
82 __asm__ __volatile__ (
83 " dccleana(%0); "
84 :
85 : "r" (start)
86 );
87 start += LINESIZE;
88 }
89 local_irq_restore(flags);
90}
91
92void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
93{
94 unsigned long lines = spanlines(start, end-1);
95 unsigned long i, flags;
96
97 start &= ~(LINESIZE - 1);
98
99 local_irq_save(flags);
100
101 for (i = 0; i < lines; i++) {
102 __asm__ __volatile__ (
103 " dcinva(%0); "
104 :
105 : "r" (start)
106 );
107 start += LINESIZE;
108 }
109 local_irq_restore(flags);
110}
111
112
113
114
115/*
116 * This is just really brutal and shouldn't be used anyways,
117 * especially on V2. Left here just in case.
118 */
119void flush_cache_all_hexagon(void)
120{
121 unsigned long flags;
122 local_irq_save(flags);
123 __vmcache_ickill();
124 __vmcache_dckill();
125 __vmcache_l2kill();
126 local_irq_restore(flags);
127 mb();
128}
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
deleted file mode 100644
index 7fc94f3e664..00000000000
--- a/arch/hexagon/mm/copy_from_user.S
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * User memory copy functions for kernel
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * The right way to do this involves valignb
23 * The easy way to do this is only speed up src/dest similar alignment.
24 */
25
26/*
27 * Copy to/from user are the same, except that for packets with a load and
28 * a store, I don't know how to tell which kind of exception we got.
29 * Therefore, we duplicate the function, and handle faulting addresses
30 * differently for each function
31 */
32
33/*
34 * copy from user: loads can fault
35 */
36#define src_sav r13
37#define dst_sav r12
38#define src_dst_sav r13:12
39#define d_dbuf r15:14
40#define w_dbuf r15
41
42#define dst r0
43#define src r1
44#define bytes r2
45#define loopcount r5
46
47#define FUNCNAME __copy_from_user_hexagon
48#include "copy_user_template.S"
49
50 /* LOAD FAULTS from COPY_FROM_USER */
51
52 /* Alignment loop. r2 has been updated. Return it. */
53 .falign
541009:
552009:
564009:
57 {
58 r0 = r2
59 jumpr r31
60 }
61 /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
62 /* X - (A - B) == X + B - A */
63 .falign
648089:
65 {
66 memd(dst) = d_dbuf
67 r2 += sub(src_sav,src)
68 }
69 {
70 r0 = r2
71 jumpr r31
72 }
73 .falign
744089:
75 {
76 memw(dst) = w_dbuf
77 r2 += sub(src_sav,src)
78 }
79 {
80 r0 = r2
81 jumpr r31
82 }
83 .falign
842089:
85 {
86 memh(dst) = w_dbuf
87 r2 += sub(src_sav,src)
88 }
89 {
90 r0 = r2
91 jumpr r31
92 }
93 .falign
941089:
95 {
96 memb(dst) = w_dbuf
97 r2 += sub(src_sav,src)
98 }
99 {
100 r0 = r2
101 jumpr r31
102 }
103
104 /* COPY FROM USER: only loads can fail */
105
106 .section __ex_table,"a"
107 .long 1000b,1009b
108 .long 2000b,2009b
109 .long 4000b,4009b
110 .long 8080b,8089b
111 .long 4080b,4089b
112 .long 2080b,2089b
113 .long 1080b,1089b
114 .previous
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
deleted file mode 100644
index 0cfbcc09d1d..00000000000
--- a/arch/hexagon/mm/copy_to_user.S
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * User memory copying routines for the Hexagon Kernel
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/* The right way to do this involves valignb
22 * The easy way to do this is only speed up src/dest similar alignment.
23 */
24
25/*
26 * Copy to/from user are the same, except that for packets with a load and
27 * a store, I don't know how to tell which kind of exception we got.
28 * Therefore, we duplicate the function, and handle faulting addresses
29 * differently for each function
30 */
31
32/*
33 * copy to user: stores can fault
34 */
35#define src_sav r13
36#define dst_sav r12
37#define src_dst_sav r13:12
38#define d_dbuf r15:14
39#define w_dbuf r15
40
41#define dst r0
42#define src r1
43#define bytes r2
44#define loopcount r5
45
46#define FUNCNAME __copy_to_user_hexagon
47#include "copy_user_template.S"
48
49 /* STORE FAULTS from COPY_TO_USER */
50 .falign
511109:
522109:
534109:
54 /* Alignment loop. r2 has been updated. Return it. */
55 {
56 r0 = r2
57 jumpr r31
58 }
59 /* Normal copy loops. Use dst-dst_sav to compute distance */
60 /* dst holds best write, no need to unwind any loops */
61 /* X - (A - B) == X + B - A */
62 .falign
638189:
648199:
654189:
664199:
672189:
682199:
691189:
701199:
71 {
72 r2 += sub(dst_sav,dst)
73 }
74 {
75 r0 = r2
76 jumpr r31
77 }
78
79 /* COPY TO USER: only stores can fail */
80 .section __ex_table,"a"
81 .long 1100b,1109b
82 .long 2100b,2109b
83 .long 4100b,4109b
84 .long 8180b,8189b
85 .long 8190b,8199b
86 .long 4180b,4189b
87 .long 4190b,4199b
88 .long 2180b,2189b
89 .long 2190b,2199b
90 .long 1180b,1189b
91 .long 1190b,1199b
92 .previous
diff --git a/arch/hexagon/mm/copy_user_template.S b/arch/hexagon/mm/copy_user_template.S
deleted file mode 100644
index 254d8cc766b..00000000000
--- a/arch/hexagon/mm/copy_user_template.S
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19/* Numerology:
20 * WXYZ
21 * W: width in bytes
22 * X: Load=0, Store=1
23 * Y: Location 0=preamble,8=loop,9=epilog
24 * Z: Location=0,handler=9
25 */
26 .text
27 .global FUNCNAME
28 .type FUNCNAME, @function
29 .p2align 5
30FUNCNAME:
31 {
32 p0 = cmp.gtu(bytes,#0)
33 if (!p0.new) jump:nt .Ldone
34 r3 = or(dst,src)
35 r4 = xor(dst,src)
36 }
37 {
38 p1 = cmp.gtu(bytes,#15)
39 p0 = bitsclr(r3,#7)
40 if (!p0.new) jump:nt .Loop_not_aligned_8
41 src_dst_sav = combine(src,dst)
42 }
43
44 {
45 loopcount = lsr(bytes,#3)
46 if (!p1) jump .Lsmall
47 }
48 p3=sp1loop0(.Loop8,loopcount)
49.Loop8:
508080:
518180:
52 {
53 if (p3) memd(dst++#8) = d_dbuf
54 d_dbuf = memd(src++#8)
55 }:endloop0
568190:
57 {
58 memd(dst++#8) = d_dbuf
59 bytes -= asl(loopcount,#3)
60 jump .Lsmall
61 }
62
63.Loop_not_aligned_8:
64 {
65 p0 = bitsclr(r4,#7)
66 if (p0.new) jump:nt .Lalign
67 }
68 {
69 p0 = bitsclr(r3,#3)
70 if (!p0.new) jump:nt .Loop_not_aligned_4
71 p1 = cmp.gtu(bytes,#7)
72 }
73
74 {
75 if (!p1) jump .Lsmall
76 loopcount = lsr(bytes,#2)
77 }
78 p3=sp1loop0(.Loop4,loopcount)
79.Loop4:
804080:
814180:
82 {
83 if (p3) memw(dst++#4) = w_dbuf
84 w_dbuf = memw(src++#4)
85 }:endloop0
864190:
87 {
88 memw(dst++#4) = w_dbuf
89 bytes -= asl(loopcount,#2)
90 jump .Lsmall
91 }
92
93.Loop_not_aligned_4:
94 {
95 p0 = bitsclr(r3,#1)
96 if (!p0.new) jump:nt .Loop_not_aligned
97 p1 = cmp.gtu(bytes,#3)
98 }
99
100 {
101 if (!p1) jump .Lsmall
102 loopcount = lsr(bytes,#1)
103 }
104 p3=sp1loop0(.Loop2,loopcount)
105.Loop2:
1062080:
1072180:
108 {
109 if (p3) memh(dst++#2) = w_dbuf
110 w_dbuf = memuh(src++#2)
111 }:endloop0
1122190:
113 {
114 memh(dst++#2) = w_dbuf
115 bytes -= asl(loopcount,#1)
116 jump .Lsmall
117 }
118
119.Loop_not_aligned: /* Works for as small as one byte */
120 p3=sp1loop0(.Loop1,bytes)
121.Loop1:
1221080:
1231180:
124 {
125 if (p3) memb(dst++#1) = w_dbuf
126 w_dbuf = memub(src++#1)
127 }:endloop0
128 /* Done */
1291190:
130 {
131 memb(dst) = w_dbuf
132 jumpr r31
133 r0 = #0
134 }
135
136.Lsmall:
137 {
138 p0 = cmp.gtu(bytes,#0)
139 if (p0.new) jump:nt .Loop_not_aligned
140 }
141.Ldone:
142 {
143 r0 = #0
144 jumpr r31
145 }
146 .falign
147.Lalign:
1481000:
149 {
150 if (p0.new) w_dbuf = memub(src)
151 p0 = tstbit(src,#0)
152 if (!p1) jump .Lsmall
153 }
1541100:
155 {
156 if (p0) memb(dst++#1) = w_dbuf
157 if (p0) bytes = add(bytes,#-1)
158 if (p0) src = add(src,#1)
159 }
1602000:
161 {
162 if (p0.new) w_dbuf = memuh(src)
163 p0 = tstbit(src,#1)
164 if (!p1) jump .Lsmall
165 }
1662100:
167 {
168 if (p0) memh(dst++#2) = w_dbuf
169 if (p0) bytes = add(bytes,#-2)
170 if (p0) src = add(src,#2)
171 }
1724000:
173 {
174 if (p0.new) w_dbuf = memw(src)
175 p0 = tstbit(src,#2)
176 if (!p1) jump .Lsmall
177 }
1784100:
179 {
180 if (p0) memw(dst++#4) = w_dbuf
181 if (p0) bytes = add(bytes,#-4)
182 if (p0) src = add(src,#4)
183 jump FUNCNAME
184 }
185 .size FUNCNAME,.-FUNCNAME
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
deleted file mode 100644
index 69ffcfd2879..00000000000
--- a/arch/hexagon/mm/init.c
+++ /dev/null
@@ -1,276 +0,0 @@
1/*
2 * Memory subsystem initialization for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/bootmem.h>
24#include <asm/atomic.h>
25#include <linux/highmem.h>
26#include <asm/tlb.h>
27#include <asm/sections.h>
28#include <asm/vm_mmu.h>
29
30/*
31 * Define a startpg just past the end of the kernel image and a lastpg
32 * that corresponds to the end of real or simulated platform memory.
33 */
34#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET))
35
36unsigned long bootmem_lastpg; /* Should be set by platform code */
37
38/* Set as variable to limit PMD copies */
39int max_kernel_seg = 0x303;
40
41/* think this should be (page_size-1) the way it's used...*/
42unsigned long zero_page_mask;
43
44/* indicate pfn's of high memory */
45unsigned long highstart_pfn, highend_pfn;
46
47/* struct mmu_gather defined in asm-generic.h; */
48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
49
50/* Default cache attribute for newly created page tables */
51unsigned long _dflt_cache_att = CACHEDEF;
52
53/*
54 * The current "generation" of kernel map, which should not roll
55 * over until Hell freezes over. Actual bound in years needs to be
56 * calculated to confirm.
57 */
58DEFINE_SPINLOCK(kmap_gen_lock);
59
60/* checkpatch says don't init this to 0. */
61unsigned long long kmap_generation;
62
63/*
64 * mem_init - initializes memory
65 *
66 * Frees up bootmem
67 * Fixes up more stuff for HIGHMEM
68 * Calculates and displays memory available/used
69 */
70void __init mem_init(void)
71{
72 /* No idea where this is actually declared. Seems to evade LXR. */
73 totalram_pages += free_all_bootmem();
74 num_physpages = bootmem_lastpg; /* seriously, what? */
75
76 printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
77
78 /*
79 * To-Do: someone somewhere should wipe out the bootmem map
80 * after we're done?
81 */
82
83 /*
84 * This can be moved to some more virtual-memory-specific
85 * initialization hook at some point. Set the init_mm
86 * descriptors "context" value to point to the initial
87 * kernel segment table's physical address.
88 */
89 init_mm.context.ptbase = __pa(init_mm.pgd);
90}
91
92/*
93 * free_initmem - frees memory used by stuff declared with __init
94 *
95 * Todo: free pages between __init_begin and __init_end; possibly
96 * some devtree related stuff as well.
97 */
98void __init_refok free_initmem(void)
99{
100}
101
102/*
103 * free_initrd_mem - frees... initrd memory.
104 * @start - start of init memory
105 * @end - end of init memory
106 *
107 * Apparently has to be passed the address of the initrd memory.
108 *
109 * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
110 */
111void free_initrd_mem(unsigned long start, unsigned long end)
112{
113}
114
115void sync_icache_dcache(pte_t pte)
116{
117 unsigned long addr;
118 struct page *page;
119
120 page = pte_page(pte);
121 addr = (unsigned long) page_address(page);
122
123 __vmcache_idsync(addr, PAGE_SIZE);
124}
125
126/*
127 * In order to set up page allocator "nodes",
128 * somebody has to call free_area_init() for UMA.
129 *
130 * In this mode, we only have one pg_data_t
131 * structure: contig_mem_data.
132 */
133void __init paging_init(void)
134{
135 unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
136
137 /*
138 * This is not particularly well documented anywhere, but
139 * give ZONE_NORMAL all the memory, including the big holes
140 * left by the kernel+bootmem_map which are already left as reserved
141 * in the bootmem_map; free_area_init should see those bits and
142 * adjust accordingly.
143 */
144
145 zones_sizes[ZONE_NORMAL] = max_low_pfn;
146
147 free_area_init(zones_sizes); /* sets up the zonelists and mem_map */
148
149 /*
150 * Start of high memory area. Will probably need something more
151 * fancy if we... get more fancy.
152 */
153 high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
154}
155
156#ifndef DMA_RESERVE
157#define DMA_RESERVE (4)
158#endif
159
160#define DMA_CHUNKSIZE (1<<22)
161#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
162
163/*
164 * Pick out the memory size. We look for mem=size,
165 * where size is "size[KkMm]"
166 */
167static int __init early_mem(char *p)
168{
169 unsigned long size;
170 char *endp;
171
172 size = memparse(p, &endp);
173
174 bootmem_lastpg = PFN_DOWN(size);
175
176 return 0;
177}
178early_param("mem", early_mem);
179
180size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
181
182void __init setup_arch_memory(void)
183{
184 int bootmap_size;
185 /* XXX Todo: this probably should be cleaned up */
186 u32 *segtable = (u32 *) &swapper_pg_dir[0];
187 u32 *segtable_end;
188
189 /*
190 * Set up boot memory allocator
191 *
192 * The Gorman book also talks about these functions.
193 * This needs to change for highmem setups.
194 */
195
196 /* Memory size needs to be a multiple of 16M */
197 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
198 ~((BIG_KERNEL_PAGE_SIZE) - 1));
199
200 /*
201 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
202 * memory allocation
203 */
204 bootmap_size = init_bootmem(bootmem_startpg, bootmem_lastpg -
205 PFN_DOWN(DMA_RESERVED_BYTES));
206
207 printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
208 printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
209 printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
210 printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
211
212 /*
213 * The default VM page tables (will be) populated with
214 * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
215 * higher than what we have memory for.
216 */
217
218 /* this is pointer arithmetic; each entry covers 4MB */
219 segtable = segtable + (PAGE_OFFSET >> 22);
220
221 /* this actually only goes to the end of the first gig */
222 segtable_end = segtable + (1<<(30-22));
223
224 /* Move forward to the start of empty pages */
225 segtable += bootmem_lastpg >> (22-PAGE_SHIFT);
226
227 {
228 int i;
229
230 for (i = 1 ; i <= DMA_RESERVE ; i++)
231 segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
232 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
233 | __HEXAGON_C_UNC << 6
234 | __HVM_PDE_S_4MB);
235 }
236
237 printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
238 segtable_end);
239 while (segtable < (segtable_end-8))
240 *(segtable++) = __HVM_PDE_S_INVALID;
241 /* stop the pointer at the device I/O 4MB page */
242
243 printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
244 segtable);
245
246#if 0
247 /* Other half of the early device table from vm_init_segtable. */
248 printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
249 (unsigned long) _K_init_devicetable-PAGE_OFFSET);
250 *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
251 __HVM_PDE_S_4KB;
252 printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
253#endif
254
255 /*
256 * Free all the memory that wasn't taken up by the bootmap, the DMA
257 * reserve, or kernel itself.
258 */
259 free_bootmem(PFN_PHYS(bootmem_startpg)+bootmap_size,
260 PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
261 DMA_RESERVED_BYTES);
262
263 /*
264 * The bootmem allocator seemingly just lives to feed memory
265 * to the paging system
266 */
267 printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
268 paging_init(); /* See Gorman Book, 2.3 */
269
270 /*
271 * At this point, the page allocator is kind of initialized, but
272 * apparently no pages are available (just like with the bootmem
273 * allocator), and need to be freed themselves via mem_init(),
274 * which is called by start_kernel() later on in the process
275 */
276}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
deleted file mode 100644
index 5905fd5f97f..00000000000
--- a/arch/hexagon/mm/ioremap.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * I/O remap functions for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/io.h>
22#include <linux/vmalloc.h>
23
24void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
25{
26 unsigned long last_addr, addr;
27 unsigned long offset = phys_addr & ~PAGE_MASK;
28 struct vm_struct *area;
29
30 pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
31 |(__HEXAGON_C_DEV << 6));
32
33 last_addr = phys_addr + size - 1;
34
35 /* Wrapping not allowed */
36 if (!size || (last_addr < phys_addr))
37 return NULL;
38
39 /* Rounds up to next page size, including whole-page offset */
40 size = PAGE_ALIGN(offset + size);
41
42 area = get_vm_area(size, VM_IOREMAP);
43 addr = (unsigned long)area->addr;
44
45 if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
46 vunmap((void *)addr);
47 return NULL;
48 }
49
50 return (void __iomem *) (offset + addr);
51}
52
53void __iounmap(const volatile void __iomem *addr)
54{
55 vunmap((void *) ((unsigned long) addr & PAGE_MASK));
56}
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c
deleted file mode 100644
index 19760a4611d..00000000000
--- a/arch/hexagon/mm/pgalloc.c
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#include <linux/init.h>
20
21void __init pgtable_cache_init(void)
22{
23}
diff --git a/arch/hexagon/mm/strnlen_user.S b/arch/hexagon/mm/strnlen_user.S
deleted file mode 100644
index 0eecb7a768f..00000000000
--- a/arch/hexagon/mm/strnlen_user.S
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * User string length functions for kernel
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#define isrc r0
22#define max r1 /* Do not change! */
23
24#define end r2
25#define tmp1 r3
26
27#define obo r6 /* off-by-one */
28#define start r7
29#define mod8 r8
30#define dbuf r15:14
31#define dcmp r13:12
32
33/*
34 * The vector mask version of this turned out *really* badly.
35 * The hardware loop version also turned out *really* badly.
36 * Seems straight pointer arithmetic basically wins here.
37 */
38
39#define fname __strnlen_user
40
41 .text
42 .global fname
43 .type fname, @function
44 .p2align 5 /* why? */
45fname:
46 {
47 mod8 = and(isrc,#7);
48 end = add(isrc,max);
49 start = isrc;
50 }
51 {
52 P0 = cmp.eq(mod8,#0);
53 mod8 = and(end,#7);
54 dcmp = #0;
55 if (P0.new) jump:t dw_loop; /* fire up the oven */
56 }
57
58alignment_loop:
59fail_1: {
60 tmp1 = memb(start++#1);
61 }
62 {
63 P0 = cmp.eq(tmp1,#0);
64 if (P0.new) jump:nt exit_found;
65 P1 = cmp.gtu(end,start);
66 mod8 = and(start,#7);
67 }
68 {
69 if (!P1) jump exit_error; /* hit the end */
70 P0 = cmp.eq(mod8,#0);
71 }
72 {
73 if (!P0) jump alignment_loop;
74 }
75
76
77
78dw_loop:
79fail_2: {
80 dbuf = memd(start);
81 obo = add(start,#1);
82 }
83 {
84 P0 = vcmpb.eq(dbuf,dcmp);
85 }
86 {
87 tmp1 = P0;
88 P0 = cmp.gtu(end,start);
89 }
90 {
91 tmp1 = ct0(tmp1);
92 mod8 = and(end,#7);
93 if (!P0) jump end_check;
94 }
95 {
96 P0 = cmp.eq(tmp1,#32);
97 if (!P0.new) jump:nt exit_found;
98 if (!P0.new) start = add(obo,tmp1);
99 }
100 {
101 start = add(start,#8);
102 jump dw_loop;
103 } /* might be nice to combine these jumps... */
104
105
106end_check:
107 {
108 P0 = cmp.gt(tmp1,mod8);
109 if (P0.new) jump:nt exit_error; /* neverfound! */
110 start = add(obo,tmp1);
111 }
112
113exit_found:
114 {
115 R0 = sub(start,isrc);
116 jumpr R31;
117 }
118
119exit_error:
120 {
121 R0 = add(max,#1);
122 jumpr R31;
123 }
124
125 /* Uh, what does the "fixup" return here? */
126 .falign
127fix_1:
128 {
129 R0 = #0;
130 jumpr R31;
131 }
132
133 .size fname,.-fname
134
135
136.section __ex_table,"a"
137.long fail_1,fix_1
138.long fail_2,fix_1
139.previous
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
deleted file mode 100644
index 34127261c2b..00000000000
--- a/arch/hexagon/mm/uaccess.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19/*
20 * Support for user memory access from kernel. This will
21 * probably be inlined for performance at some point, but
22 * for ease of debug, and to a lesser degree for code size,
23 * we implement here as subroutines.
24 */
25#include <linux/types.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28
29/*
30 * For clear_user(), exploit previously defined copy_to_user function
31 * and the fact that we've got a handy zero page defined in kernel/head.S
32 *
33 * dczero here would be even faster.
34 */
35__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
36{
37 long uncleared;
38
39 while (count > PAGE_SIZE) {
40 uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
41 PAGE_SIZE);
42 if (uncleared)
43 return count - (PAGE_SIZE - uncleared);
44 count -= PAGE_SIZE;
45 dest += PAGE_SIZE;
46 }
47 if (count)
48 count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
49
50 return count;
51}
52
53unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
54{
55 if (!access_ok(VERIFY_WRITE, dest, count))
56 return count;
57 else
58 return __clear_user_hexagon(dest, count);
59}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
deleted file mode 100644
index 308ef0ce648..00000000000
--- a/arch/hexagon/mm/vm_fault.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * Memory fault handling for Hexagon
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * Page fault handling for the Hexagon Virtual Machine.
23 * Can also be called by a native port emulating the HVM
24 * execptions.
25 */
26
27#include <asm/pgtable.h>
28#include <asm/traps.h>
29#include <asm/uaccess.h>
30#include <linux/mm.h>
31#include <linux/signal.h>
32#include <linux/module.h>
33#include <linux/hardirq.h>
34
35/*
36 * Decode of hardware exception sends us to one of several
37 * entry points. At each, we generate canonical arguments
38 * for handling by the abstract memory management code.
39 */
40#define FLT_IFETCH -1
41#define FLT_LOAD 0
42#define FLT_STORE 1
43
44
45/*
46 * Canonical page fault handler
47 */
48void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
49{
50 struct vm_area_struct *vma;
51 struct mm_struct *mm = current->mm;
52 siginfo_t info;
53 int si_code = SEGV_MAPERR;
54 int fault;
55 const struct exception_table_entry *fixup;
56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
57 (cause > 0 ? FAULT_FLAG_WRITE : 0);
58
59 /*
60 * If we're in an interrupt or have no user context,
61 * then must not take the fault.
62 */
63 if (unlikely(in_interrupt() || !mm))
64 goto no_context;
65
66 local_irq_enable();
67
68retry:
69 down_read(&mm->mmap_sem);
70 vma = find_vma(mm, address);
71 if (!vma)
72 goto bad_area;
73
74 if (vma->vm_start <= address)
75 goto good_area;
76
77 if (!(vma->vm_flags & VM_GROWSDOWN))
78 goto bad_area;
79
80 if (expand_stack(vma, address))
81 goto bad_area;
82
83good_area:
84 /* Address space is OK. Now check access rights. */
85 si_code = SEGV_ACCERR;
86
87 switch (cause) {
88 case FLT_IFETCH:
89 if (!(vma->vm_flags & VM_EXEC))
90 goto bad_area;
91 break;
92 case FLT_LOAD:
93 if (!(vma->vm_flags & VM_READ))
94 goto bad_area;
95 break;
96 case FLT_STORE:
97 if (!(vma->vm_flags & VM_WRITE))
98 goto bad_area;
99 break;
100 }
101
102 fault = handle_mm_fault(mm, vma, address, flags);
103
104 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
105 return;
106
107 /* The most common case -- we are done. */
108 if (likely(!(fault & VM_FAULT_ERROR))) {
109 if (flags & FAULT_FLAG_ALLOW_RETRY) {
110 if (fault & VM_FAULT_MAJOR)
111 current->maj_flt++;
112 else
113 current->min_flt++;
114 if (fault & VM_FAULT_RETRY) {
115 flags &= ~FAULT_FLAG_ALLOW_RETRY;
116 flags |= FAULT_FLAG_TRIED;
117 goto retry;
118 }
119 }
120
121 up_read(&mm->mmap_sem);
122 return;
123 }
124
125 up_read(&mm->mmap_sem);
126
127 /* Handle copyin/out exception cases */
128 if (!user_mode(regs))
129 goto no_context;
130
131 if (fault & VM_FAULT_OOM) {
132 pagefault_out_of_memory();
133 return;
134 }
135
136 /* User-mode address is in the memory map, but we are
137 * unable to fix up the page fault.
138 */
139 if (fault & VM_FAULT_SIGBUS) {
140 info.si_signo = SIGBUS;
141 info.si_code = BUS_ADRERR;
142 }
143 /* Address is not in the memory map */
144 else {
145 info.si_signo = SIGSEGV;
146 info.si_code = SEGV_ACCERR;
147 }
148 info.si_errno = 0;
149 info.si_addr = (void __user *)address;
150 force_sig_info(info.si_code, &info, current);
151 return;
152
153bad_area:
154 up_read(&mm->mmap_sem);
155
156 if (user_mode(regs)) {
157 info.si_signo = SIGSEGV;
158 info.si_errno = 0;
159 info.si_code = si_code;
160 info.si_addr = (void *)address;
161 force_sig_info(SIGSEGV, &info, current);
162 return;
163 }
164 /* Kernel-mode fault falls through */
165
166no_context:
167 fixup = search_exception_tables(pt_elr(regs));
168 if (fixup) {
169 pt_set_elr(regs, fixup->fixup);
170 return;
171 }
172
173 /* Things are looking very, very bad now */
174 bust_spinlocks(1);
175 printk(KERN_EMERG "Unable to handle kernel paging request at "
176 "virtual address 0x%08lx, regs %p\n", address, regs);
177 die("Bad Kernel VA", regs, SIGKILL);
178}
179
180
181void read_protection_fault(struct pt_regs *regs)
182{
183 unsigned long badvadr = pt_badva(regs);
184
185 do_page_fault(badvadr, FLT_LOAD, regs);
186}
187
188void write_protection_fault(struct pt_regs *regs)
189{
190 unsigned long badvadr = pt_badva(regs);
191
192 do_page_fault(badvadr, FLT_STORE, regs);
193}
194
195void execute_protection_fault(struct pt_regs *regs)
196{
197 unsigned long badvadr = pt_badva(regs);
198
199 do_page_fault(badvadr, FLT_IFETCH, regs);
200}
diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c
deleted file mode 100644
index 9647d00cb76..00000000000
--- a/arch/hexagon/mm/vm_tlb.c
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * Hexagon Virtual Machine TLB functions
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * The Hexagon Virtual Machine conceals the real workings of
23 * the TLB, but there are one or two functions that need to
24 * be instantiated for it, differently from a native build.
25 */
26#include <linux/mm.h>
27#include <asm/page.h>
28#include <asm/hexagon_vm.h>
29
30/*
31 * Initial VM implementation has only one map active at a time, with
32 * TLB purgings on changes. So either we're nuking the current map,
33 * or it's a no-op. This operation is messy on true SMPs where other
34 * processors must be induced to flush the copies in their local TLBs,
35 * but Hexagon thread-based virtual processors share the same MMU.
36 */
37void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
38 unsigned long end)
39{
40 struct mm_struct *mm = vma->vm_mm;
41
42 if (mm->context.ptbase == current->active_mm->context.ptbase)
43 __vmclrmap((void *)start, end - start);
44}
45
46/*
47 * Flush a page from the kernel virtual map - used by highmem
48 */
49void flush_tlb_one(unsigned long vaddr)
50{
51 __vmclrmap((void *)vaddr, PAGE_SIZE);
52}
53
54/*
55 * Flush all TLBs across all CPUs, virtual or real.
56 * A single Hexagon core has 6 thread contexts but
57 * only one TLB.
58 */
59void tlb_flush_all(void)
60{
61 /* should probably use that fixaddr end or whateve label */
62 __vmclrmap(0, 0xffff0000);
63}
64
65/*
66 * Flush TLB entries associated with a given mm_struct mapping.
67 */
68void flush_tlb_mm(struct mm_struct *mm)
69{
70 /* Current Virtual Machine has only one map active at a time */
71 if (current->active_mm->context.ptbase == mm->context.ptbase)
72 tlb_flush_all();
73}
74
75/*
76 * Flush TLB state associated with a page of a vma.
77 */
78void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
79{
80 struct mm_struct *mm = vma->vm_mm;
81
82 if (mm->context.ptbase == current->active_mm->context.ptbase)
83 __vmclrmap((void *)vaddr, PAGE_SIZE);
84}
85
86/*
87 * Flush TLB entries associated with a kernel address range.
88 * Like flush range, but without the check on the vma->vm_mm.
89 */
90void flush_tlb_kernel_range(unsigned long start, unsigned long end)
91{
92 __vmclrmap((void *)start, end - start);
93}